text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
# TODO rename file
from enum import Enum
from typing import Callable
import ivy
import importlib
class BackendHandlerMode(Enum):
WithBackend = 0
SetBackend = 1
class WithBackendContext:
def __init__(self, backend, cached=True) -> None:
self.backend = backend
self.cached = cached
def __enter__(self):
return ivy.with_backend(self.backend, cached=self.cached)
def __exit__(self, exc_type, exc_val, exc_tb):
return
update_backend: Callable = ivy.utils.backend.ContextManager
# update_backend: Callable = WithBackendContext
class BackendHandler:
_context = WithBackendContext
_ctx_flag = 0 # BackendHandlerMode configs
@classmethod
def _update_context(cls, mode: BackendHandlerMode):
if mode == BackendHandlerMode.WithBackend:
cls._context = WithBackendContext
cls._ctx_flag = 0
elif mode == BackendHandlerMode.SetBackend:
cls._context = ivy.utils.backend.ContextManager
cls._ctx_flag = 1
else:
raise ValueError(f"Unknown backend handler mode! {mode}")
@classmethod
def update_backend(cls, backend):
return cls._context(backend)
def get_frontend_config(frontend: str):
config_module = importlib.import_module(
f"ivy_tests.test_ivy.test_frontends.config.{frontend}"
)
return config_module.get_config()
| ivy/ivy_tests/test_ivy/helpers/pipeline_helper.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/pipeline_helper.py",
"repo_id": "ivy",
"token_count": 565
} | 49 |
from .base import FrontendConfig, SupportedDtypes, SupportedDeviecs
import ivy
def get_config():
return TorchVisionFrontendConfig()
class TorchVisionFrontendConfig(FrontendConfig):
backend = ivy.with_backend("torch")
valid_devices = ["cpu", "gpu"]
invalid_devices = ["tpu"]
valid_dtypes = [
"int16",
"int32",
"int64",
"uint8",
"float16",
"float32",
"float64",
]
invalid_dtypes = [
"int8",
"uint16",
"uint32",
"uint64",
"bfloat16",
"complex64",
"complex128",
"bool",
]
valid_numeric_dtypes = [
"int16",
"int32",
"int64",
"uint8",
"float16",
"float32",
"float64",
]
invalid_numeric_dtypes = [
"int8",
"uint16",
"uint32",
"uint64",
"bfloat16",
"complex64",
"complex128",
"bool",
]
valid_int_dtypes = [
"int16",
"int32",
"int64",
"uint8",
]
invalid_int_dtypes = [
"int8",
"uint16",
"uint32",
"uint64",
]
valid_uint_dtypes = [
"uint8",
]
invalid_uint_dtypes = [
"uint16",
"uint32",
"uint64",
]
valid_float_dtypes = [
"float16",
"float32",
"float64",
]
invalid_float_dtypes = [
"bfloat16",
]
valid_complex_dtypes = []
invalid_complex_dtypes = [
"complex64",
"complex128",
]
@property
def supported_devices(self):
return SupportedDeviecs(
valid_devices=self.valid_devices, invalid_devices=self.invalid_devices
)
@property
def supported_dtypes(self):
return SupportedDtypes(
valid_dtypes=self.valid_dtypes,
invalid_dtypes=self.invalid_dtypes,
valid_numeric_dtypes=self.valid_numeric_dtypes,
invalid_numeric_dtypes=self.invalid_numeric_dtypes,
valid_int_dtypes=self.valid_int_dtypes,
invalid_int_dtypes=self.invalid_int_dtypes,
valid_uint_dtypes=self.valid_uint_dtypes,
invalid_uint_dtypes=self.invalid_uint_dtypes,
valid_float_dtypes=self.valid_float_dtypes,
invalid_float_dtypes=self.invalid_float_dtypes,
valid_complex_dtypes=self.valid_complex_dtypes,
invalid_complex_dtypes=self.invalid_complex_dtypes,
)
@property
def Dtype(self):
return self.backend.Dtype
@property
def Device(self):
return self.backend.Device
def native_array(self, x):
return self.backend.native_array(x)
def is_native_array(self, x):
return self.backend.is_native_array(x)
def to_numpy(self, x):
return self.backend.to_numpy(x)
def as_native_dtype(self, dtype: str):
return self.backend.as_native_dtype(dtype)
def as_native_device(self, device: str):
return self.backend.as_native_dev(device)
def isscalar(self, x):
return self.backend.isscalar(x)
| ivy/ivy_tests/test_ivy/test_frontends/config/torchvision.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/torchvision.py",
"repo_id": "ivy",
"token_count": 1601
} | 50 |
# global
import pytest
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="mindspore.numpy.array",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
ndmin=st.integers(min_value=0, max_value=5),
copy=st.booleans(),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_mindspore_array(
dtype_and_a,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
copy,
ndmin,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
object=a,
dtype=None,
copy=copy,
ndmin=ndmin,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_numpy.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/test_numpy.py",
"repo_id": "ivy",
"token_count": 557
} | 51 |
# global
import sys
import numpy as np
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_dtype_and_matrix,
)
# cholesky
@handle_frontend_test(
fn_tree="numpy.linalg.cholesky",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
).filter(
lambda x: np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon
and np.linalg.det(x[1][0]) != 0
),
test_with_out=st.just(False),
)
def test_numpy_cholesky(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = x[0]
x = (
np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
) # make symmetric positive-definite
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
a=x,
)
# qr
@handle_frontend_test(
fn_tree="numpy.linalg.qr",
dtype_and_x=_get_dtype_and_matrix(),
mode=st.sampled_from(("reduced", "complete")),
test_with_out=st.just(False),
)
def test_numpy_qr(
dtype_and_x,
mode,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
a=x,
mode=mode,
)
# svd
@handle_frontend_test(
fn_tree="numpy.linalg.svd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.1,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
),
full_matrices=st.booleans(),
compute_uv=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_svd(
dtype_and_x,
full_matrices,
compute_uv,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
x = x[0]
x = (
np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
) # make symmetric positive-definite
ret, ret_gt = helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
test_values=False,
fn_tree=fn_tree,
on_device=on_device,
a=x,
full_matrices=full_matrices,
compute_uv=compute_uv,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for u, v in zip(ret, ret_gt):
u = ivy_backend.to_numpy(ivy_backend.abs(u))
v = ivy_backend.to_numpy(ivy_backend.abs(v))
helpers.value_test(
ret_np_flat=u,
ret_np_from_gt_flat=v,
rtol=1e-04,
atol=1e-04,
backend=backend_fw,
ground_truth_backend=frontend,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py",
"repo_id": "ivy",
"token_count": 1802
} | 52 |
# global
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ( # noqa
ldexp_args,
)
# exp
@handle_frontend_test(
fn_tree="numpy.exp",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="exp"
),
)
def test_numpy_exp(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# exp2
@handle_frontend_test(
fn_tree="numpy.exp2",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="exp2"
),
)
def test_numpy_exp2(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# expm1
@handle_frontend_test(
fn_tree="numpy.expm1",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="expm1"
),
)
def test_numpy_expm1(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# frexp
@handle_frontend_test(
fn_tree="numpy.frexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=1,
shared_dtype=True,
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="frexp"
),
)
def test_numpy_frexp(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# i0
@handle_frontend_test(
fn_tree="numpy.i0",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
)
def test_numpy_i0(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# ldexp
@handle_frontend_test(
fn_tree="numpy.ldexp",
dtype_and_x=ldexp_args(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="ldexp"
),
)
def test_numpy_ldexp(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# log
@handle_frontend_test(
fn_tree="numpy.log",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="log",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="log"
),
)
def test_numpy_log(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# log10
@handle_frontend_test(
fn_tree="numpy.log10",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="log10"
),
)
def test_numpy_log10(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# log1p
@handle_frontend_test(
fn_tree="numpy.log1p",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="log",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="log1p"
),
)
def test_numpy_log1p(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-3,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# log2
@handle_frontend_test(
fn_tree="numpy.log2",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="linear",
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="log2"
),
)
def test_numpy_log2(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-3,
atol=1e-3,
x=x[0],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# logaddexp
@handle_frontend_test(
fn_tree="numpy.logaddexp",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logaddexp"
),
)
def test_numpy_logaddexp(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-3,
atol=1e-3,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# logaddexp2
@handle_frontend_test(
fn_tree="numpy.logaddexp2",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_value=-100,
max_value=100,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="logaddexp2"
),
)
def test_numpy_logaddexp2(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py",
"repo_id": "ivy",
"token_count": 7314
} | 53 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method
CLASS_TREE = "ivy.functional.frontends.numpy.random.Generator"
# multinomial
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.random.Generator",
method_name="multinomial",
n=helpers.ints(min_value=2, max_value=10),
dtype=helpers.get_dtypes("float", full=False),
size=st.tuples(
st.integers(min_value=1, max_value=10), st.integers(min_value=2, max_value=2)
),
)
def test_numpy_multinomial(
n,
dtype,
on_device,
size,
init_flags,
method_flags,
frontend_method_data,
frontend,
backend_fw,
):
helpers.test_frontend_method(
init_input_dtypes=dtype,
init_flags=init_flags,
backend_to_test=backend_fw,
method_flags=method_flags,
init_all_as_kwargs_np={
# ToDo: to fix this temporary placeholder for BitGenerator
"bit_generator": np.random.PCG64(),
},
method_input_dtypes=dtype,
method_all_as_kwargs_np={
"n": n,
"pvals": np.array([1 / n] * n, dtype=dtype[0]),
"size": size,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
test_values=False,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_Generator.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_Generator.py",
"repo_id": "ivy",
"token_count": 670
} | 54 |
# global
from hypothesis import given
import pytest
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy.functional.frontends.onnx as onnx
import ivy.functional.frontends.torch as torch
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="onnx.Abs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False),
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_onnx_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", prune_function=False),
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
)
)
def test_onnx_abs_v2(dtype_x):
_, data = dtype_x
x_onnx = onnx.Tensor(data[0])
x_torch = torch.Tensor(data[0])
onnx_abs = onnx.abs(x_onnx)
torch_abs = torch.abs(x_torch)
ret = helpers.flatten_and_to_np(ret=onnx_abs)
ret_gt = helpers.flatten_and_to_np(ret=torch_abs)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
ground_truth_backend="torch",
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="onnx.Acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_onnx_acos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", prune_function=False),
).filter(lambda x: "float16" not in x[0]),
)
def test_onnx_acos_v2(dtype_x):
_, data = dtype_x
x_onnx = onnx.Tensor(data[0])
x_torch = torch.Tensor(data[0])
onnx_acos = onnx.acos(x_onnx)
torch_acos = torch.acos(x_torch)
ret = helpers.flatten_and_to_np(ret=onnx_acos)
ret_gt = helpers.flatten_and_to_np(ret=torch_acos)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
ground_truth_backend="tensorflow",
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="onnx.Acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_onnx_acosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", prune_function=False),
).filter(lambda x: "float16" not in x[0]),
)
def test_onnx_acosh_v2(dtype_x):
_, data = dtype_x
x_onnx = onnx.Tensor(data[0])
x_torch = torch.Tensor(data[0])
onnx_acosh = onnx.acosh(x_onnx)
torch_acosh = torch.acosh(x_torch)
ret = helpers.flatten_and_to_np(ret=onnx_acosh)
ret_gt = helpers.flatten_and_to_np(ret=torch_acosh)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
ground_truth_backend="tensorflow",
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="onnx.Add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
alpha=st.integers(min_value=1, max_value=5),
)
def test_onnx_add(
*,
dtype_and_x,
alpha,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
other=x[1],
alpha=alpha,
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", prune_function=False),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_onnx_add_v2(dtype_x):
_, data = dtype_x
x_onnx_1 = onnx.Tensor(data[0])
x_onnx_2 = onnx.Tensor(data[1])
x_torch_1 = torch.Tensor(data[0])
x_torch_2 = torch.Tensor(data[1])
onnx_add = onnx.add(x_onnx_1, x_onnx_2)
torch_add = torch.add(x_torch_1, x_torch_2)
ret = helpers.flatten_and_to_np(ret=onnx_add)
ret_gt = helpers.flatten_and_to_np(ret=torch_add)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
ground_truth_backend="tensorflow",
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@handle_frontend_test(
fn_tree="onnx.Asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_onnx_asin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", prune_function=False),
).filter(lambda x: "float16" not in x[0]),
)
def test_onnx_asin_v2(dtype_x):
_, data = dtype_x
x_onnx = onnx.Tensor(data[0])
x_torch = torch.Tensor(data[0])
onnx_asin = onnx.asin(x_onnx)
torch_asin = torch.asin(x_torch)
ret = helpers.flatten_and_to_np(ret=onnx_asin)
ret_gt = helpers.flatten_and_to_np(ret=torch_asin)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
ground_truth_backend="tensorflow",
)
| ivy/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_onnx/test_elementwise.py",
"repo_id": "ivy",
"token_count": 3621
} | 55 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _chw_image_shape_helper(draw):
c = draw(st.sampled_from([1, 3]), label="channel")
h = draw(helpers.ints(min_value=1, max_value=100), label="height")
w = draw(helpers.ints(min_value=1, max_value=100), label="width")
shape = (c, h, w)
return shape
# --- Main --- #
# ------------ #
# adjust_brightness
@handle_frontend_test(
fn_tree="paddle.vision.transforms.adjust_brightness",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=_chw_image_shape_helper(),
),
brightness_factor=helpers.floats(min_value=0),
)
def test_paddle_adjust_brightness(
*,
dtype_and_x,
brightness_factor,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
img=x[0],
brightness_factor=brightness_factor,
)
# adjust_hue
@handle_frontend_test(
fn_tree="paddle.vision.transforms.adjust_hue",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=0,
max_value=255,
min_num_dims=3,
max_num_dims=3,
min_dim_size=3,
max_dim_size=3,
),
hue_factor=helpers.floats(min_value=-0.5, max_value=0.5),
)
def test_paddle_adjust_hue(
*,
dtype_and_x,
hue_factor,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-3,
atol=1e-3,
on_device=on_device,
img=x[0],
hue_factor=hue_factor,
)
# hflip
@handle_frontend_test(
fn_tree="paddle.vision.transforms.hflip",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=0,
min_num_dims=3,
max_num_dims=3,
min_dim_size=3,
max_dim_size=3,
),
)
def test_paddle_hflip(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
img=x[0],
)
# pad
@handle_frontend_test(
fn_tree="paddle.vision.transforms.pad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=3,
max_num_dims=3,
min_dim_size=3,
max_dim_size=5,
),
padding=st.one_of(
st.integers(min_value=0, max_value=2),
st.tuples(
st.integers(min_value=0, max_value=2), st.integers(min_value=0, max_value=2)
),
st.tuples(
st.integers(min_value=0, max_value=2),
st.integers(min_value=0, max_value=2),
st.integers(min_value=0, max_value=2),
st.integers(min_value=0, max_value=2),
),
),
fill=st.integers(min_value=-5, max_value=5),
padding_mode=st.sampled_from(["constant", "edge", "reflect"]),
)
def test_paddle_pad(
*,
dtype_and_x,
padding,
fill,
padding_mode,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
img=x[0],
padding=padding,
fill=fill,
padding_mode=padding_mode,
backend_to_test=backend_fw,
)
# to_tensor
@handle_frontend_test(
fn_tree="paddle.to_tensor",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_paddle_to_tensor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
pic=input[0],
)
@handle_frontend_test(
fn_tree="paddle.vision.transforms.vflip",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=3,
max_num_dims=4,
),
)
def test_paddle_vflip(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
img=x[0],
backend_to_test=backend_fw,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_vision/test_transforms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_vision/test_transforms.py",
"repo_id": "ivy",
"token_count": 2784
} | 56 |
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="sklearn.datasets.make_circles",
n_samples=helpers.ints(min_value=1, max_value=10),
)
def test_sklearn_make_circles(
n_samples,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
n_samples=n_samples,
input_dtypes=["int32"],
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
test_values=False,
)
@handle_frontend_test(
fn_tree="sklearn.datasets.make_moons",
n_samples=helpers.ints(min_value=1, max_value=5),
)
def test_sklearn_make_moons(
n_samples,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
n_samples=n_samples,
input_dtypes=["int32"],
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
test_values=False,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_datasets/test_samples_generators.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_datasets/test_samples_generators.py",
"repo_id": "ivy",
"token_count": 575
} | 57 |
# global
from hypothesis import assume, strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
from ivy_tests.test_ivy.test_functional.test_nn.test_layers import (
_assume_tf_dilation_gt_1,
_output_shape,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _average_pool_args(draw):
dims = draw(st.integers(min_value=1, max_value=3))
data_formats = ["NWC", "NHWC", "NDHWC"]
data_format = data_formats[dims - 1]
return (
draw(
helpers.arrays_for_pooling(
min_dims=dims + 2, max_dims=dims + 2, min_side=1, max_side=4
)
),
data_format,
)
# sufficient_statistics
@st.composite
def _axes_value(draw):
s = draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
)
)
dtype_and_x = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
shape=s,
valid_axis=True,
force_tuple_axis=True,
)
)
return dtype_and_x
@st.composite
def _batch_normalization_helper(draw):
shape1, shape2, shape3, shape4 = draw(helpers.mutually_broadcastable_shapes(4))
shape = helpers.broadcast_shapes(shape1, shape2, shape3, shape4)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
max_value=999,
min_value=-1001,
)
)
_, mean = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape1,
min_value=-1001,
max_value=999,
)
)
_, variance = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape2,
min_value=0,
max_value=999,
)
)
_, offset = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape3,
min_value=-1001,
max_value=999,
)
)
_, scale = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=shape4,
min_value=-1001,
max_value=999,
)
)
return x_dtype, x[0], mean[0], variance[0], offset[0], scale[0]
@st.composite
def _dropout_helper(draw):
shape = draw(helpers.get_shape(min_num_dims=1))
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=shape,
)
)
noise_shape = list(shape)
if draw(st.booleans()):
noise_shape = None
else:
for i, _ in enumerate(noise_shape):
if draw(st.booleans()):
noise_shape[i] = 1
elif draw(st.booleans()):
noise_shape[i] = None
seed = draw(helpers.ints(min_value=0, max_value=100))
rate = draw(helpers.floats(min_value=0, max_value=0.9))
return (
dtype_and_x,
noise_shape,
seed,
rate,
)
@st.composite
def _generate_bias_data(draw, keras_backend_fn=False):
data_format = draw(st.sampled_from(["NC...", "N...C", None]))
channel_dim = 1 if data_format == "NC..." else -1
if keras_backend_fn:
data_format = {"NC...": "channels_first", "N...C": "channels_last", None: None}[
data_format
]
dtype, value, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=3,
ret_shape=True,
)
)
channel_size = shape[channel_dim]
bias = draw(helpers.array_values(dtype=dtype[0], shape=(channel_size,)))
return data_format, dtype, value, bias
# Normalize Moments
@st.composite
def _normalize_moments_helper(draw):
shape1, shape2, shape3 = draw(helpers.mutually_broadcastable_shapes(3))
counts_dtype, counts = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
max_value=999,
min_value=-1001,
max_num_dims=1,
max_dim_size=1,
min_dim_size=1,
)
)
_, mean = draw(
helpers.dtype_and_values(
available_dtypes=counts_dtype,
shape=shape1,
min_value=1,
max_num_dims=1,
max_dim_size=1,
min_dim_size=1,
)
)
_, variance = draw(
helpers.dtype_and_values(
available_dtypes=counts_dtype,
shape=shape2,
min_value=1,
max_num_dims=1,
max_dim_size=1,
min_dim_size=1,
)
)
_, shift = draw(
helpers.dtype_and_values(
available_dtypes=counts_dtype,
shape=shape3,
min_value=1,
max_num_dims=1,
max_dim_size=1,
min_dim_size=1,
)
)
return counts_dtype, counts[0], mean[0], variance[0], shift[0]
@st.composite
def _pool_args(draw):
dims = draw(st.integers(min_value=3, max_value=5))
data_formats = {3: "NWC", 4: "NHWC", 5: "NDHWC"}
data_format = data_formats[dims]
pooling_type = draw(st.one_of(st.just("AVG"), st.just("MAX")))
return (
draw(
helpers.arrays_for_pooling(
min_dims=dims,
max_dims=dims,
min_side=1,
max_side=4,
return_dilation=True,
)
),
data_format,
pooling_type,
dims,
)
@st.composite
def _x_and_filters(
draw,
dtypes,
data_format,
padding=None,
stride_min=1,
stride_max=4,
dilation_min=1,
dilation_max=4,
type: str = "2d",
transpose=False,
atrous=False,
):
data_format = draw(data_format)
dtype = draw(dtypes)
if type is not None:
if "1" in type:
dim = 1
elif "2" in type:
dim = 2
elif "3" in type:
dim = 3
elif type in ["depthwise", "separable"]:
dim = 2
else:
dim = len(data_format) - 2
if padding is None:
padding = st.one_of(
st.lists(
st.tuples(
st.integers(min_value=0, max_value=3),
st.integers(min_value=0, max_value=3),
),
min_size=dim,
max_size=dim,
),
st.sampled_from(["SAME", "VALID"]),
)
padding = draw(padding)
if atrous:
dilations = draw(st.integers(dilation_min, dilation_max))
else:
dilations = draw(
st.one_of(
st.integers(dilation_min, dilation_max),
st.lists(
st.integers(dilation_min, dilation_max), min_size=dim, max_size=dim
),
)
)
fdilations = [dilations] * dim if isinstance(dilations, int) else dilations
if atrous:
stride = 1
elif type in ["depthwise", "separable"]:
# if any value in dilations is greater than 1, tensorflow implements
# depthwise_covn2d as an atrous depthwise convolution, in which case all values
# in strides must be equal to 1.
if any(x > 1 for x in fdilations):
stride = 1
else:
stride = draw(st.integers(stride_min, stride_max))
else:
stride = draw(
st.one_of(
st.integers(stride_min, stride_max),
st.lists(
st.integers(stride_min, stride_max), min_size=dim, max_size=dim
),
)
)
fstride = [stride] * dim if isinstance(stride, int) else stride
if dim == 1:
if not transpose:
filter_shape = draw(
st.tuples(
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=1, max_value=3),
helpers.ints(min_value=1, max_value=3),
)
)
min_x_width = filter_shape[0] + (filter_shape[0] - 1) * (fdilations[0] - 1)
else:
filter_shape = draw(
st.tuples(
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=1, max_value=3),
helpers.ints(min_value=1, max_value=3),
)
)
min_x_width = 1
if transpose:
d_in = filter_shape[2]
else:
d_in = filter_shape[1]
if data_format == "NWC":
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=min_x_width, max_value=100),
helpers.ints(min_value=d_in, max_value=d_in),
)
)
else:
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=d_in, max_value=d_in),
helpers.ints(min_value=min_x_width, max_value=100),
)
)
elif dim == 2:
min_x_height = 1
min_x_width = 1
filter_shape = draw(
st.tuples(
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=1, max_value=3),
helpers.ints(min_value=1, max_value=3),
)
)
if not transpose:
min_x_height = filter_shape[0] + (filter_shape[0] - 1) * (fdilations[0] - 1)
min_x_width = filter_shape[1] + (filter_shape[1] - 1) * (fdilations[1] - 1)
if transpose:
d_in = filter_shape[3]
else:
d_in = filter_shape[2]
if data_format == "NHWC":
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
helpers.ints(min_value=d_in, max_value=d_in),
)
)
else:
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=d_in, max_value=d_in),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
)
)
elif dim == 3:
filter_shape = draw(
st.tuples(
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=3, max_value=5),
helpers.ints(min_value=1, max_value=3),
helpers.ints(min_value=1, max_value=3),
)
)
if not transpose:
min_x_depth = filter_shape[0] + (filter_shape[0] - 1) * (fdilations[0] - 1)
min_x_height = filter_shape[1] + (filter_shape[1] - 1) * (fdilations[1] - 1)
min_x_width = filter_shape[2] + (filter_shape[2] - 1) * (fdilations[2] - 1)
else:
min_x_depth = 1
min_x_height = 1
min_x_width = 1
if transpose:
d_in = filter_shape[4]
else:
d_in = filter_shape[3]
if data_format == "NDHWC":
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=min_x_depth, max_value=100),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
helpers.ints(min_value=d_in, max_value=d_in),
)
)
else:
x_shape = draw(
st.tuples(
helpers.ints(min_value=1, max_value=5),
helpers.ints(min_value=d_in, max_value=d_in),
helpers.ints(min_value=min_x_depth, max_value=100),
helpers.ints(min_value=min_x_height, max_value=100),
helpers.ints(min_value=min_x_width, max_value=100),
)
)
if data_format[-1] == "C":
x_dims = x_shape[1:-1]
else:
x_dims = x_shape[2:]
if transpose:
output_shape = _output_shape(
dim, fdilations, fstride, padding, x_dims, filter_shape
)
assume(all(s > 0 for s in output_shape))
if data_format[1] == "C":
output_shape = [x_shape[0], filter_shape[dim], *output_shape]
else:
output_shape = [x_shape[0], *output_shape, filter_shape[dim]]
if not isinstance(padding, str):
assume(
all(
max(pad) - min(pad) < min(stride, dilation)
for pad, stride, dilation in zip(padding, fstride, fdilations)
)
)
if data_format[1] == "C":
padding = [(0, 0), (0, 0), *padding]
else:
padding = [(0, 0), *padding, (0, 0)]
x = draw(
helpers.array_values(dtype=dtype[0], shape=x_shape, min_value=0, max_value=1)
)
filters = draw(
helpers.array_values(
dtype=dtype[0], shape=filter_shape, min_value=0, max_value=1
)
)
if type == "separable":
p_filter_shape = (
1,
1,
filter_shape[-1] * filter_shape[-2],
draw(helpers.ints(min_value=1, max_value=3)),
)
p_filters = draw(
helpers.array_values(
dtype=dtype[0], shape=p_filter_shape, min_value=0, max_value=1
)
)
filters = [filters, p_filters]
if type in ["depthwise", "separable"]:
stride = [1, stride, stride, 1]
if isinstance(dilations, int):
dilations = [dilations] * dim
elif not atrous and type is not None:
if transpose:
if isinstance(stride, int):
stride = [stride]
else:
if draw(st.booleans()):
stride = [1, *stride, 1]
if isinstance(dilations, int):
dilations = [dilations]
else:
if draw(st.booleans()):
dilations = [1, *dilations, 1]
else:
if dim != 3:
if isinstance(stride, int):
stride = [stride]
else:
if draw(st.booleans()):
stride = [1, *stride, 1]
if isinstance(dilations, int):
dilations = [dilations]
else:
if draw(st.booleans()):
dilations = [1, *dilations, 1]
else:
if isinstance(stride, int):
stride = [stride] * dim
stride = [1, *stride, 1]
if isinstance(dilations, int):
dilations = [dilations] * dim
dilations = [1, *dilations, 1]
if not transpose:
return dtype, x, filters, dilations, data_format, stride, padding
return dtype, x, filters, dilations, data_format, stride, padding, output_shape
@st.composite
def df(draw, data_format):
data_format = draw(data_format)
return data_format
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="tensorflow.nn.atrous_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
stride_min=1,
stride_max=1,
type="2d",
atrous=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_atrous_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilations, data_format, stride, pad = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x,
filters=filters,
rate=dilations,
padding=pad,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.atrous_conv2d_transpose",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
stride_min=1,
stride_max=1,
dilation_max=1,
type="2d",
transpose=True,
atrous=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_atrous_conv2d_transpose(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
) = x_f_d_df
_assume_tf_dilation_gt_1("tensorflow", on_device, dilations)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x,
filters=filters,
output_shape=output_shape,
rate=dilations,
padding=pad,
)
# average_pool
@handle_frontend_test(
fn_tree="tensorflow.nn.avg_pool",
x_k_s_p_df=_average_pool_args(),
test_with_out=st.just(False),
)
def test_tensorflow_avg_pool(
*,
x_k_s_p_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(input_dtype, x, ksize, strides, padding), data_format = x_k_s_p_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
# test_avg_pool1d
@handle_frontend_test(
fn_tree="tensorflow.nn.avg_pool1d",
x_k_s_p_df=helpers.arrays_for_pooling(
min_dims=3, max_dims=3, min_side=1, max_side=4
),
test_with_out=st.just(False),
)
def test_tensorflow_avg_pool1d(
*,
x_k_s_p_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(input_dtype, x, ksize, strides, padding) = x_k_s_p_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.avg_pool2d",
x_k_s_p_df=helpers.arrays_for_pooling(
min_dims=4, max_dims=4, min_side=1, max_side=4
),
test_with_out=st.just(False),
)
def test_tensorflow_avg_pool2d(
*,
x_k_s_p_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.avg_pool3d",
x_k_s_p_df=helpers.arrays_for_pooling(
min_dims=5, max_dims=5, min_side=1, max_side=4
),
test_with_out=st.just(False),
)
def test_tensorflow_avg_pool3d(
*,
x_k_s_p_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.batch_normalization",
data=_batch_normalization_helper(),
eps=helpers.floats(min_value=1e-5, max_value=0.1),
)
def test_tensorflow_batch_normalization(
*,
data,
eps,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
x_dtype, x, mean, variance, offset, scale = data
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
rtol=1e-2,
atol=1e-2,
fn_tree=fn_tree,
on_device=on_device,
x=x,
mean=mean,
variance=variance,
offset=offset,
scale=scale,
variance_epsilon=eps,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.bias_add",
data=_generate_bias_data(),
test_with_out=st.just(False),
)
def test_tensorflow_bias_add(
*,
data,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
data_format, dtype, value, bias = data
helpers.test_frontend_function(
input_dtypes=dtype * 2,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=value[0],
bias=bias,
data_format=data_format,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv1d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="1d",
),
test_with_out=st.just(False),
)
def test_tensorflow_conv1d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilations, data_format, stride, pad = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
stride=stride,
padding=pad,
data_format=data_format,
dilations=dilations,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv1d_transpose",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="1d",
transpose=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_conv1d_transpose(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
) = x_f_d_df
_assume_tf_dilation_gt_1("tensorflow", on_device, dilations)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
output_shape=output_shape,
strides=stride,
padding=pad,
data_format=data_format,
dilations=dilations,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
type="2d",
),
)
def test_tensorflow_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv2d_transpose",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
type="2d",
transpose=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_conv2d_transpose(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
filters,
dilation,
data_format,
stride,
padding,
output_shape,
) = x_f_d_df
assume(isinstance(padding, str) or backend_fw in ["torch", "tensorflow"])
_assume_tf_dilation_gt_1("tensorflow", on_device, dilation)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
output_shape=output_shape,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv3d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NDHWC"]),
padding=st.sampled_from(["SAME"]),
type="3d",
),
test_with_out=st.just(False),
)
def test_tensorflow_conv3d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
_assume_tf_dilation_gt_1("tensorflow", on_device, dilation)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.conv3d_transpose",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NDHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="3d",
transpose=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_conv3d_transpose(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
filters,
dilation,
data_format,
stride,
padding,
output_shape,
) = x_f_d_df
_assume_tf_dilation_gt_1("tensorflow", on_device, dilation)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
output_shape=output_shape,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
# convolution
@handle_frontend_test(
fn_tree="tensorflow.nn.convolution",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NWC", "NHWC", "NDHWC"]),
padding=st.sampled_from(["SAME", "VALID"]),
dilation_max=1,
type=None,
),
test_with_out=st.just(False),
)
def test_tensorflow_convolution(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filters=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
# crelu
@handle_frontend_test(
fn_tree="tensorflow.nn.crelu",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=4,
max_axes_size=3,
force_int_axis=True,
valid_axis=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_crelu(
*,
dtype_x_and_axis,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
axis=axis,
)
# ctc_unique_labels
@handle_frontend_test(
fn_tree="tensorflow.nn.ctc_unique_labels",
dtype_x=helpers.dtype_and_values(
available_dtypes=["int64", "int32"],
min_value=1,
max_value=100,
min_dim_size=1,
max_dim_size=10,
min_num_dims=2,
max_num_dims=2,
),
test_with_out=st.just([False]),
)
def test_tensorflow_ctc_unique_labels(
*,
dtype_x,
frontend,
fn_tree,
test_flags,
on_device,
backend_fw,
):
dtype, x = dtype_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
labels=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.nn.depthwise_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="depthwise",
),
test_with_out=st.just(False),
)
def test_tensorflow_depthwise_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filter=filters,
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.dropout",
dtype_x_noiseshape=_dropout_helper(),
)
def test_tensorflow_dropout(
*,
dtype_x_noiseshape,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(x_dtype, x), noise_shape, seed, rate = dtype_x_noiseshape
if rate == 0:
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
rate=rate,
noise_shape=noise_shape,
seed=seed,
)
else:
ret = helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
rate=rate,
noise_shape=noise_shape,
seed=seed,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
for u in ret:
# cardinality test
assert u.shape == x[0].shape
# embedding_lookup
@handle_frontend_test(
fn_tree="tensorflow.nn.embedding_lookup",
dtypes_indices_weights=helpers.embedding_helper(),
max_norm=st.floats(min_value=0.1, max_value=5, exclude_min=True),
)
def test_tensorflow_embedding_lookup(
*,
dtypes_indices_weights,
max_norm,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
dtypes, indices, weight, _ = dtypes_indices_weights
dtypes.reverse()
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
params=weight,
ids=indices,
max_norm=max_norm,
atol=1e-4,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.gelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
max_value=1e04,
),
approximate=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_gelu(
*,
dtype_and_x,
approximate,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
approximate=approximate,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.leaky_relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
large_abs_safety_factor=25,
small_abs_safety_factor=25,
safety_factor_scale="log",
),
test_with_out=st.just(False),
alpha=helpers.floats(
min_value=0,
max_value=1,
large_abs_safety_factor=25,
small_abs_safety_factor=25,
safety_factor_scale="log",
),
)
def test_tensorflow_leaky_relu(
*,
dtype_and_x,
alpha,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
return helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
alpha=alpha,
)
# local_response_normalization
@handle_frontend_test(
fn_tree="tensorflow.nn.local_response_normalization",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
depth_radius=st.integers(min_value=1, max_value=5),
bias=st.floats(min_value=0.1, max_value=1.5),
alpha=st.floats(min_value=0.1, max_value=1.5),
beta=st.floats(min_value=0.1, max_value=1.5),
test_with_out=st.just(False),
)
def test_tensorflow_local_response_normalization(
*,
dtype_and_x,
depth_radius,
bias,
alpha,
beta,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
input=x[0],
depth_radius=depth_radius,
bias=bias,
alpha=alpha,
beta=beta,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.log_poisson_loss",
dtype_target_log_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=0,
max_value=1,
min_num_dims=1,
max_num_dims=3,
shared_dtype=True,
),
compute_full_loss=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_log_poisson_loss(
*,
dtype_target_log_inputs,
compute_full_loss,
test_flags,
frontend,
fn_tree,
on_device,
backend_fw,
):
input_dtype, input_values = dtype_target_log_inputs
targets, log_input = input_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
targets=targets,
log_input=log_input,
compute_full_loss=compute_full_loss,
atol=1e-2,
)
# max_pool1d
@handle_frontend_test(
fn_tree="tensorflow.nn.max_pool1d",
data_format=df(data_format=st.sampled_from(["NWC"])),
x_k_s_p=helpers.arrays_for_pooling(min_dims=3, max_dims=3, min_side=1, max_side=4),
test_with_out=st.just(False),
)
def test_tensorflow_max_pool1d(
*,
x_k_s_p,
data_format,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p
data_format = data_format
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
# max_pool2d
@handle_frontend_test(
fn_tree="tensorflow.nn.max_pool2d",
data_format=df(data_format=st.sampled_from(["NHWC"])),
x_k_s_p=helpers.arrays_for_pooling(min_dims=4, max_dims=4, min_side=1, max_side=4),
test_with_out=st.just(False),
)
def test_tensorflow_max_pool2d(
*,
x_k_s_p,
data_format,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p
data_format = data_format
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
# max_pool3d
@handle_frontend_test(
fn_tree="tensorflow.nn.max_pool3d",
data_format=st.just("NDHWC"), # Pooling3DOp only supports NDHWC on device type CPU
x_k_s_p=helpers.arrays_for_pooling(min_dims=5, max_dims=5, min_side=1, max_side=4),
test_with_out=st.just(False),
)
def test_tensorflow_max_pool3d(
*,
x_k_s_p,
data_format,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p
data_format = data_format
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
# moments
@handle_frontend_test(
fn_tree="tensorflow.nn.moments",
dtype_x_axis=_statistical_dtype_values(function="mean"),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_moments(
*,
dtype_x_axis,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
x=x[0],
axes=axis,
keepdims=keepdims,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.normalize_moments",
data=_normalize_moments_helper(),
)
def test_tensorflow_normalize_moments(
*,
data,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
counts_dtype, counts, mean, variance, shift = data
helpers.test_frontend_function(
input_dtypes=counts_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
counts=counts,
mean_ss=mean,
variance_ss=variance,
shift=shift,
)
# pool
@handle_frontend_test(
fn_tree="tensorflow.nn.pool",
x_k_s_p_df=_pool_args(),
test_with_out=st.just(False),
)
def test_tensorflow_pool(
*,
x_k_s_p_df,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
(
(input_dtype, x, ksize, strides, padding, dilation),
data_format,
pooling_type,
num_dims,
) = x_k_s_p_df
if num_dims == 3:
strides = (strides[0],)
elif num_dims == 4:
strides = (strides[0], strides[0])
elif num_dims == 5:
strides = (strides[0], strides[0], strides[0])
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
window_shape=ksize,
pooling_type=pooling_type,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation,
)
# relu
@handle_frontend_test(
fn_tree="tensorflow.nn.relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_relu(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
# relu6
@handle_frontend_test(
fn_tree="tensorflow.nn.relu6",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_relu6(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.nn.separable_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="separable",
),
test_with_out=st.just(False),
)
def test_tensorflow_separable_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
depthwise_filter=filters[0],
pointwise_filter=filters[1],
strides=stride,
padding=padding,
data_format=data_format,
dilations=dilation,
)
# sigmoid_cross_entropy_with_logits
@handle_frontend_test(
fn_tree="tensorflow.nn.sigmoid_cross_entropy_with_logits",
dtype_labels_logits=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=0,
max_value=1,
min_num_dims=1,
max_num_dims=2,
min_dim_size=1,
max_dim_size=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_sigmoid_cross_entropy_with_logits(
*,
dtype_labels_logits,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input_values = dtype_labels_logits
labels, logits = input_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
labels=labels,
logits=logits,
)
# silu
@handle_frontend_test(
fn_tree="tensorflow.nn.silu",
dtype_features=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=5,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
beta=helpers.floats(
min_value=0,
max_value=3,
),
test_with_out=st.just(False),
)
def test_tensorflow_silu(
*,
dtype_features,
beta,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, features = dtype_features
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
features=features[0],
beta=beta,
)
# softmax
@handle_frontend_test(
fn_tree="tensorflow.nn.softmax",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
force_int_axis=True,
valid_axis=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_softmax(
*,
dtype_x_and_axis,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
logits=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="tensorflow.nn.sufficient_statistics",
dtypes_x_axes_shift=_axes_value(),
sh=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float"), shape=()),
keepdims=st.booleans(),
)
def test_tensorflow_sufficient_statistics(
*,
dtypes_x_axes_shift,
sh,
keepdims,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtypes, x, a = dtypes_x_axes_shift
return helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axes=a,
shift=sh[1][0],
keepdims=keepdims,
name=None,
)
# weighted_cross_entropy_with_logits
@handle_frontend_test(
fn_tree="tensorflow.nn.weighted_cross_entropy_with_logits",
dtype_labels_logits=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=0,
max_value=1,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
shared_dtype=True,
),
pos_weight=st.one_of(
helpers.floats(
min_value=0,
max_value=3,
)
),
test_with_out=st.just(False),
)
def test_tensorflow_weighted_cross_entropy_with_logits(
*,
dtype_labels_logits,
pos_weight,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input_values = dtype_labels_logits
labels, logits = input_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
labels=labels,
logits=logits,
pos_weight=pos_weight,
)
# weighted moments
@handle_frontend_test(
fn_tree="tensorflow.nn.weighted_moments",
dtype_and_x_and_axis=_statistical_dtype_values(function="mean"),
dtype_and_fw=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_value=0.00001,
),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_weighted_moments(
*,
dtype_and_x_and_axis,
dtype_and_fw,
keepdims,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_and_x_and_axis
fw_dtype, fw = dtype_and_fw
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
x=x[0],
axes=axis,
frequency_weights=fw[0],
keepdims=keepdims,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py",
"repo_id": "ivy",
"token_count": 26962
} | 58 |
# global
from hypothesis import given, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy.functional.frontends.torch.func_wrapper import (
inputs_to_ivy_arrays,
outputs_to_frontend_arrays,
to_ivy_arrays_and_back,
numpy_to_torch_style_args,
)
from ivy.functional.frontends.torch.tensor import Tensor
import ivy.functional.frontends.torch as torch_frontend
# --- Helpers --- #
# --------------- #
def _fn(*args, dtype=None, check_default=False, inplace=False):
if (
check_default
and all(not (ivy.is_array(i) or hasattr(i, "ivy_array")) for i in args)
and not ivy.exists(dtype)
):
ivy.utils.assertions.check_equal(
ivy.default_float_dtype(),
torch_frontend.get_default_dtype(),
as_array=False,
)
ivy.utils.assertions.check_equal(
ivy.default_int_dtype(), "int64", as_array=False
)
return args[0]
# --- Main --- #
# ------------ #
@numpy_to_torch_style_args
def mocked_func(dim=None, keepdim=None, input=None, other=None):
return dim, keepdim, input, other
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0])
)
def test_torch_inputs_to_ivy_arrays(dtype_and_x, backend_fw):
x_dtype, x = dtype_and_x
ivy.set_backend(backend=backend_fw)
# check for ivy array
input_ivy = ivy.array(x[0], dtype=x_dtype[0])
output = inputs_to_ivy_arrays(_fn)(input_ivy)
assert isinstance(output, ivy.Array)
assert input_ivy.dtype == output.dtype
assert ivy.all(input_ivy == output)
# check for native array
input_native = ivy.native_array(input_ivy)
output = inputs_to_ivy_arrays(_fn)(input_native)
assert isinstance(output, ivy.Array)
assert ivy.as_ivy_dtype(input_native.dtype) == str(output.dtype)
assert ivy.all(input_native == output.data)
# check for frontend array
input_frontend = Tensor(x[0])
input_frontend.ivy_array = input_ivy
output = inputs_to_ivy_arrays(_fn)(input_frontend)
assert isinstance(output, ivy.Array)
assert str(input_frontend.dtype) == str(output.dtype)
assert ivy.all(input_frontend.ivy_array == output)
ivy.previous_backend()
@given(
dim=st.integers(),
keepdim=st.booleans(),
input=st.lists(st.integers()),
other=st.integers(),
)
def test_torch_numpy_to_torch_style_args(dim, keepdim, input, other):
# PyTorch-style keyword arguments
assert (dim, keepdim, input, other) == mocked_func(
dim=dim, keepdim=keepdim, input=input, other=other
)
# NumPy-style keyword arguments
assert (dim, keepdim, input, other) == mocked_func(
axis=dim, keepdims=keepdim, x=input, x2=other
)
# Mixed-style keyword arguments
assert (dim, keepdim, input, other) == mocked_func(
axis=dim, keepdim=keepdim, input=input, x2=other
)
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
dtype=helpers.get_dtypes("valid", none=True, full=False, prune_function=False),
generate_type=st.sampled_from(["frontend", "ivy", "native"]),
inplace=st.booleans(),
)
def test_torch_outputs_to_frontend_arrays(
dtype_and_x,
dtype,
generate_type,
inplace,
backend_fw,
):
x_dtype, x = dtype_and_x
ivy.set_backend(backend_fw)
x = ivy.array(x[0], dtype=x_dtype[0])
if generate_type == "frontend":
x = Tensor(x)
elif generate_type == "native":
x = x.data
if not len(x.shape):
scalar_x = ivy.to_scalar(x.ivy_array if isinstance(x, Tensor) else x)
outputs_to_frontend_arrays(_fn)(
scalar_x, scalar_x, check_default=True, dtype=dtype
)
outputs_to_frontend_arrays(_fn)(scalar_x, x, check_default=True, dtype=dtype)
output = outputs_to_frontend_arrays(_fn)(
x, check_default=True, dtype=dtype, inplace=inplace
)
assert isinstance(output, Tensor)
if inplace:
if generate_type == "frontend":
assert x is output
elif generate_type == "native":
assert x is output.ivy_array.data
else:
assert x is output.ivy_array
else:
assert ivy.as_ivy_dtype(x.dtype) == ivy.as_ivy_dtype(output.dtype)
if generate_type == "frontend":
assert ivy.all(x.ivy_array == output.ivy_array)
elif generate_type == "native":
assert ivy.all(x == output.ivy_array.data)
else:
assert ivy.all(x == output.ivy_array)
assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == []
ivy.previous_backend()
@given(
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
dtype=helpers.get_dtypes("valid", none=True, full=False, prune_function=False),
)
def test_torch_to_ivy_arrays_and_back(dtype_and_x, dtype, backend_fw):
x_dtype, x = dtype_and_x
ivy.set_backend(backend_fw)
# check for ivy array
input_ivy = ivy.array(x[0], dtype=x_dtype[0])
if not len(input_ivy.shape):
scalar_input_ivy = ivy.to_scalar(input_ivy)
to_ivy_arrays_and_back(_fn)(
scalar_input_ivy, scalar_input_ivy, check_default=True, dtype=dtype
)
to_ivy_arrays_and_back(_fn)(
scalar_input_ivy, input_ivy, check_default=True, dtype=dtype
)
output = to_ivy_arrays_and_back(_fn)(input_ivy, check_default=True, dtype=dtype)
assert isinstance(output, Tensor)
assert str(input_ivy.dtype) == str(output.dtype)
assert ivy.all(input_ivy == output.ivy_array)
# check for native array
input_native = ivy.native_array(input_ivy)
if not len(input_native.shape):
scalar_input_native = ivy.to_scalar(input_native)
to_ivy_arrays_and_back(_fn)(
scalar_input_native, scalar_input_native, check_default=True, dtype=dtype
)
to_ivy_arrays_and_back(_fn)(
scalar_input_native, input_native, check_default=True, dtype=dtype
)
output = to_ivy_arrays_and_back(_fn)(input_native, check_default=True, dtype=dtype)
assert isinstance(output, Tensor)
assert ivy.as_ivy_dtype(input_native.dtype) == str(output.dtype)
assert ivy.all(input_native == output.ivy_array.data)
# check for frontend array
input_frontend = Tensor(x[0])
input_frontend.ivy_array = input_ivy
if not len(input_frontend.shape):
scalar_input_front = inputs_to_ivy_arrays(ivy.to_scalar)(input_frontend)
to_ivy_arrays_and_back(_fn)(
scalar_input_front, scalar_input_front, check_default=True, dtype=dtype
)
to_ivy_arrays_and_back(_fn)(
scalar_input_front, input_frontend, check_default=True, dtype=dtype
)
output = to_ivy_arrays_and_back(_fn)(
input_frontend, check_default=True, dtype=dtype
)
assert isinstance(output, Tensor)
assert input_frontend.dtype == output.dtype
assert ivy.all(input_frontend.ivy_array == output.ivy_array)
assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == []
ivy.previous_backend()
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_func_wrapper.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_func_wrapper.py",
"repo_id": "ivy",
"token_count": 3332
} | 59 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
inf = float("inf")
# --- Helpers --- #
# --------------- #
@st.composite
def get_dtype_num_classes(draw):
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=1,
min_value=1,
max_value=10,
max_num_dims=0,
)
)
input_dtype, x = dtype_and_x
print(max(x))
num_classes = draw(st.integers(min_value=max(x) + 1, max_value=10))
return (num_classes, dtype_and_x)
# embedding
@handle_frontend_test(
fn_tree="torch.nn.functional.embedding",
dtypes_indices_weights=helpers.embedding_helper(),
max_norm=st.floats(min_value=0.1, max_value=5, exclude_min=True),
p=st.one_of(
st.sampled_from([inf, -inf]),
st.integers(min_value=1, max_value=2),
st.floats(min_value=1.0, max_value=2.0),
),
test_with_out=st.just(False),
)
def test_torch_embedding(
*,
dtypes_indices_weights,
max_norm,
p,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, indices, weight, padding_idx = dtypes_indices_weights
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=indices,
weight=weight,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=p,
)
# one_hot
@handle_frontend_test(
fn_tree="torch.nn.functional.one_hot",
num_classes_dtype_x_axis=get_dtype_num_classes(),
)
def test_torch_one_hot(
*,
num_classes_dtype_x_axis,
frontend,
fn_tree,
test_flags,
backend_fw,
on_device,
):
num_classes, values = num_classes_dtype_x_axis
input_dtype, x = values
test_flags.num_positional_args += 1
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
tensor=x[0],
num_classes=num_classes,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_sparse_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_sparse_functions.py",
"repo_id": "ivy",
"token_count": 1134
} | 60 |
"""Collection of tests for statistical functions."""
# global
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _get_castable_dtype(draw, min_value=None, max_value=None):
available_dtypes = helpers.get_dtypes("valid")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=6,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
min_value=min_value,
max_value=max_value,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
return dtype1, [values], axis, dtype2
@st.composite
def _statistical_dtype_values(draw, *, function, min_value=None, max_value=None):
large_abs_safety_factor = 2
small_abs_safety_factor = 2
if any(ele in function for ele in ["mean", "std", "var"]):
large_abs_safety_factor = 24
small_abs_safety_factor = 24
dtype, values, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
valid_axis=True,
allow_neg_axes=False,
min_axes_size=1,
min_value=min_value,
max_value=max_value,
allow_nan=True if "nan" in function else False,
)
)
shape = values[0].shape
size = values[0].size
max_correction = np.min(shape)
if "complex" in dtype[0]:
# TODO skip complex median test until added ?
# because it is not supported in tensorflow (ground truth backend)
dtype = ["float32"]
if any(ele in function for ele in ["std", "var"]):
if size == 1:
correction = 0
elif isinstance(axis, int):
correction = draw(
helpers.ints(min_value=0, max_value=shape[axis] - 1)
| helpers.floats(min_value=0, max_value=shape[axis] - 1)
)
return dtype, values, axis, correction
else:
correction = draw(
helpers.ints(min_value=0, max_value=max_correction - 1)
| helpers.floats(min_value=0, max_value=max_correction - 1)
)
return dtype, values, axis, correction
if isinstance(axis, tuple):
axis = axis[0]
where_shape = draw(
helpers.mutually_broadcastable_shapes(
num_shapes=1, base_shape=shape, min_dims=0, max_dims=axis
)
)
dtype3, where = draw(
helpers.dtype_and_values(available_dtypes=["bool"], shape=where_shape[0])
)
return dtype, values, axis, dtype3, where
# --- Main --- #
# ------------ #
# cumprod
@handle_test(
fn_tree="functional.ivy.cumprod",
dtype_x_axis_castable=_get_castable_dtype(),
exclusive=st.booleans(),
reverse=st.booleans(),
test_gradients=st.just(False),
)
def test_cumprod(
*,
dtype_x_axis_castable,
exclusive,
reverse,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
# ToDo: set as_variable_flags as the parameter generated by test_cumprod once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if "torch" in backend_fw:
assume(not test_flags.as_variable[0])
assume(not test_flags.test_gradients)
# gradient tests have been disabled for cumprod as the gradients computed by the
# backends are inconsistent with tensorflow returning a zero gradient when the
# product is zero (discrete optimization), and torch and jax returning a non-zero
# gradient based on the value used to compute the product even if it's zero
# ToDo: Revisit this later
if np.abs(np.min(np.abs(x[0])) - 0) < 1e-4:
assume(not test_flags.test_gradients)
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=castable_dtype,
rtol_=1e-1,
atol_=1e-1,
)
@handle_test(
fn_tree="functional.ivy.cumsum",
dtype_x_axis_castable=_get_castable_dtype(),
exclusive=st.booleans(),
reverse=st.booleans(),
test_gradients=st.just(False),
)
def test_cumsum(
*,
dtype_x_axis_castable,
exclusive,
reverse,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
assume("bool" not in input_dtype)
# ToDo: set as_variable_flags as the parameter generated by test_cumsum once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if "torch" in backend_fw:
assume(not test_flags.as_variable[0])
assume(not test_flags.test_gradients)
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
dtype=castable_dtype,
rtol_=1e-1,
atol_=1e-1,
)
# TODO: add more general tests and fix get instance method testing passing
# einsum
@handle_test(
fn_tree="functional.ivy.einsum",
eq_n_op_n_shp=helpers.einsum_helper(),
test_instance_method=st.just(False),
dtype=helpers.get_dtypes("numeric", full=False),
)
def test_einsum(
*,
eq_n_op_n_shp,
dtype,
test_flags,
backend_fw,
fn_name,
on_device,
):
eq, operands, dtypes = eq_n_op_n_shp
kw = {}
# x_dtype = np.dtype(dtype[0])
for i, x_ in enumerate(operands):
dtype = dtypes[i][0]
kw[f"x{i}"] = np.array(x_).astype(dtype)
# len(operands) + 1 because of the equation
test_flags.num_positional_args = len(operands) + 1
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
equation=eq,
**kw,
rtol_=1e-2,
atol_=1e-2,
)
# max
@handle_test(
fn_tree="functional.ivy.max",
dtype_and_x=_statistical_dtype_values(function="max"),
keep_dims=st.booleans(),
)
def test_max(*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis, *_ = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keep_dims,
)
# mean
@handle_test(
fn_tree="functional.ivy.mean",
dtype_and_x=_statistical_dtype_values(function="mean"),
keep_dims=st.booleans(),
)
def test_mean(*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis, dtype3, where = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
tolerance_dict={"bfloat16": 1e-1},
x=x[0],
axis=axis,
keepdims=keep_dims,
)
# min
@handle_test(
fn_tree="functional.ivy.min",
dtype_and_x=_statistical_dtype_values(function="min"),
keep_dims=st.booleans(),
test_gradients=st.just(False),
initial=st.integers(min_value=-5, max_value=5),
)
def test_min(
*, dtype_and_x, keep_dims, initial, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, dtype3, where = dtype_and_x
helpers.test_function(
input_dtypes=[input_dtype[0], dtype3[0]],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keep_dims,
initial=initial,
where=where[0],
)
# prod
@handle_test(
fn_tree="functional.ivy.prod",
dtype_x_axis_castable=_get_castable_dtype(),
keep_dims=st.booleans(),
)
def test_prod(
*, dtype_x_axis_castable, keep_dims, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
# ToDo: set as_variable_flags as the parameter generated by test_prod once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if "torch" in backend_fw:
assume(not test_flags.as_variable[0])
assume(not test_flags.test_gradients)
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
rtol_=1e-1,
atol_=1e-1,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keep_dims,
dtype=castable_dtype,
)
# std
@handle_test(
fn_tree="functional.ivy.std",
dtype_and_x=_statistical_dtype_values(function="std"),
keep_dims=st.booleans(),
)
def test_std(*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x=x[0],
axis=axis,
correction=correction,
keepdims=keep_dims,
)
# sum
@handle_test(
fn_tree="functional.ivy.sum",
dtype_x_axis_castable=_get_castable_dtype(),
keep_dims=st.booleans(),
test_gradients=st.just(False),
)
def test_sum(
*, dtype_x_axis_castable, keep_dims, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, castable_dtype = dtype_x_axis_castable
# ToDo: set as_variable_flags as the parameter generated by test_sum once
# this issue is marked as completed https://github.com/pytorch/pytorch/issues/75733
if "torch" in backend_fw:
assume(not test_flags.as_variable[0])
assume(not test_flags.test_gradients)
if "jax" in backend_fw and castable_dtype in ["complex64", "complex128"]:
assume(not test_flags.test_gradients)
helpers.test_function(
input_dtypes=[input_dtype],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-2,
x=x[0],
axis=axis,
keepdims=keep_dims,
dtype=castable_dtype,
)
# var
@handle_test(
fn_tree="functional.ivy.var",
dtype_and_x=_statistical_dtype_values(function="var"),
keep_dims=st.booleans(),
)
def test_var(*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x=x[0],
axis=axis,
correction=correction,
keepdims=keep_dims,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_statistical.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_statistical.py",
"repo_id": "ivy",
"token_count": 5716
} | 61 |
# global
from hypothesis import assume, strategies as st
import numpy as np
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method, handle_test, BackendHandler
from ivy_tests.test_ivy.test_functional.test_core.test_elementwise import (
not_too_close_to_zero,
pow_helper,
)
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_first_matrix_and_dtype,
_get_second_matrix_and_dtype,
)
CLASS_TREE = "ivy.array"
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__abs__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
large_abs_safety_factor=1.5,
small_abs_safety_factor=1.5,
safety_factor_scale="log",
),
)
def test_array__abs__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__add__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__add__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__and__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__and__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__bool__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
max_num_dims=0,
min_value=0,
max_value=1,
),
)
def test_array__bool__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__complex__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
),
method_container_flags=st.just([False]),
)
def test_array__complex__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__deepcopy__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
),
)
def test_array__deepcopy__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={"memodict": {}},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__divmod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__divmod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__eq__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__eq__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__float__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
),
)
def test_array__float__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__floordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=3.0,
small_abs_safety_factor=3.0,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__floordiv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ge__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__ge__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__getitem__",
ground_truth_backend="numpy",
dtypes_x_query=helpers.dtype_array_query(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_array__getitem__(
dtypes_x_query,
init_flags,
method_flags,
method_name,
class_name,
backend_fw,
ground_truth_backend,
on_device,
):
dtypes, x, query = dtypes_x_query
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x},
init_input_dtypes=[dtypes[0]],
method_input_dtypes=[*dtypes[1:]],
method_all_as_kwargs_np={"query": query},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__gt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__gt__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__iadd__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__iadd__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__iand__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__iand__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ifloordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=3.0,
small_abs_safety_factor=3.0,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__ifloordiv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ilshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
method_container_flags=st.just([False]),
)
def test_array__ilshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
x[1] = np.asarray(np.clip(x[1], 0, np.iinfo(dtype[1]).bits - 1), dtype=dtype[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__imatmul__",
x1=_get_first_matrix_and_dtype(),
x2=_get_second_matrix_and_dtype(),
method_container_flags=st.just([False]),
)
def test_array__imatmul__(
x1,
x2,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype1, x1 = x1
dtype2, x2 = x2
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x1},
init_input_dtypes=dtype1,
method_input_dtypes=dtype2,
method_all_as_kwargs_np={"other": x2},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__imod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__imod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__imul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__imul__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__int__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
min_value=-1e15,
max_value=1e15,
),
method_container_flags=st.just([False]),
)
def test_array__int__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__invert__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
),
)
def test_array__invert__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ior__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__ior__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ipow__",
dtype_and_x=pow_helper(),
method_container_flags=st.just([False]),
)
def test_array__ipow__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# Make sure x2 isn't a float when x1 is integer
assume(
not (ivy.is_int_dtype(input_dtype[0] and ivy.is_float_dtype(input_dtype[1])))
)
# Make sure x2 is non-negative when both is integer
if ivy.is_int_dtype(input_dtype[1]) and ivy.is_int_dtype(input_dtype[0]):
x[1] = np.abs(x[1])
x[0] = not_too_close_to_zero(x[0])
x[1] = not_too_close_to_zero(x[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[input_dtype[0]],
method_input_dtypes=[input_dtype[1]],
method_all_as_kwargs_np={"power": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__irshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
method_container_flags=st.just([False]),
)
def test_array__irshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
x[1] = np.asarray(np.clip(x[1], 0, np.iinfo(dtype[1]).bits - 1), dtype=dtype[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__isub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__isub__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__iter__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_dim_size=2,
min_num_dims=1,
),
)
def test_array__iter__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__itruediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__itruediv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ixor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
method_container_flags=st.just([False]),
)
def test_array__ixor__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__le__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__le__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__len__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_array__len__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__lshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
)
def test_array__lshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
max_bits = np.iinfo(dtype[0]).bits
max_shift = max_bits - 1
x[1] = np.asarray(np.clip(x[1], 0, max_shift), dtype=dtype[1])
max_value_before_shift = 2 ** (max_bits - x[1]) - 1
overflow_threshold = 2 ** (max_bits - 1)
x[0] = np.asarray(np.clip(x[0], None, max_value_before_shift), dtype=dtype[0])
if np.any(x[0] > overflow_threshold):
x[0] = np.asarray(np.clip(x[0], None, overflow_threshold), dtype=dtype[0])
if np.any(x[0] < 0):
x[0] = np.asarray(np.abs(x[0]), dtype=dtype[0])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
rtol_=1e-5,
atol_=1e-5,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__lt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__lt__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__matmul__",
x=_get_first_matrix_and_dtype(),
y=_get_second_matrix_and_dtype(),
)
def test_array__matmul__(
x,
y,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype1, x = x
input_dtype2, y = y
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x},
init_input_dtypes=input_dtype1,
method_input_dtypes=input_dtype2,
method_all_as_kwargs_np={"other": y},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__mod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__mod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__mul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__mul__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ne__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__ne__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__neg__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_array__neg__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__or__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__or__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__pos__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_array__pos__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__pow__",
dtype_and_x=pow_helper(),
)
def test_array__pow__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# Make sure x2 isn't a float when x1 is integer
assume(
not (ivy.is_int_dtype(input_dtype[0] and ivy.is_float_dtype(input_dtype[1])))
)
# Make sure x2 is non-negative when both is integer
if ivy.is_int_dtype(input_dtype[1]) and ivy.is_int_dtype(input_dtype[0]):
x[1] = np.abs(x[1])
x[0] = not_too_close_to_zero(x[0])
x[1] = not_too_close_to_zero(x[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[input_dtype[0]],
method_input_dtypes=[input_dtype[1]],
method_all_as_kwargs_np={"power": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__radd__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__radd__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rand__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__rand__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rdivmod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rdivmod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rfloordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=3.0,
small_abs_safety_factor=3.0,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rfloordiv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rlshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
)
def test_array__rlshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
x[0] = np.asarray(np.clip(x[1], 0, np.iinfo(dtype[1]).bits - 1), dtype=dtype[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rmatmul__",
x1=_get_first_matrix_and_dtype(),
x2=_get_second_matrix_and_dtype(),
)
def test_array__rmatmul__(
x1,
x2,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype1, x1 = x1
dtype2, x2 = x2
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x2},
init_input_dtypes=dtype1,
method_input_dtypes=dtype2,
method_all_as_kwargs_np={"other": x1},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rmod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rmod__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rmul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rmul__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__ror__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__ror__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rpow__",
dtype_and_x=pow_helper(),
)
def test_array__rpow__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
# bfloat16 is not supported by numpy
assume("bfloat16" not in input_dtype)
# Make sure x2 isn't a float when x1 is integer
assume(
not (ivy.is_int_dtype(input_dtype[0] and ivy.is_float_dtype(input_dtype[1])))
)
# Make sure x2 is non-negative when both is integer
if ivy.is_int_dtype(input_dtype[1]) and ivy.is_int_dtype(input_dtype[0]):
x[1] = np.abs(x[1])
x[0] = not_too_close_to_zero(x[0])
x[1] = not_too_close_to_zero(x[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[1]},
init_input_dtypes=[input_dtype[1]],
method_input_dtypes=[input_dtype[0]],
method_all_as_kwargs_np={"power": x[0]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rrshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
array_api_dtypes=True,
),
)
def test_array__rrshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
x[0] = np.asarray(np.clip(x[0], 0, np.iinfo(dtype[0]).bits - 1), dtype=dtype[0])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_value=0,
shared_dtype=True,
),
)
def test_array__rshift__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
x[1] = np.asarray(np.clip(x[1], 0, np.iinfo(dtype[1]).bits - 1), dtype=dtype[1])
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=[dtype[0]],
method_input_dtypes=[dtype[1]],
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rsub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rsub__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rtruediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__rtruediv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__rxor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__rxor__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__setitem__",
ground_truth_backend="numpy",
dtypes_x_query_val=helpers.dtype_array_query_val(
available_dtypes=helpers.get_dtypes("valid"),
),
# ToDo: fix container method
method_container_flags=st.just([False]),
)
def test_array__setitem__(
dtypes_x_query_val,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtypes, x, query, val = dtypes_x_query_val
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x},
init_input_dtypes=[dtypes[0]],
method_input_dtypes=[*dtypes[1:]],
method_all_as_kwargs_np={"query": query, "val": val},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__sub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__sub__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__truediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_array__truediv__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
@handle_method(
init_tree=CLASS_TREE,
method_tree="Array.__xor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")),
num_arrays=2,
shared_dtype=True,
),
)
def test_array__xor__(
dtype_and_x,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
on_device=on_device,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"data": x[0]},
init_input_dtypes=dtype,
method_input_dtypes=dtype,
method_all_as_kwargs_np={"other": x[1]},
class_name=class_name,
method_name=method_name,
)
def test_array_function():
HANDLED_FUNCTIONS = {}
class MyArray:
def __init__(self, data=None):
self.data = data
def __ivy_array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(
issubclass(t, (MyArray, ivy.Array, ivy.NativeArray)) for t in types
):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def implements(ivy_function):
"""Register an __ivy_array_function__ implementation for MyArray
objects."""
def decorator(func):
HANDLED_FUNCTIONS[ivy_function] = func
return func
return decorator
@implements(ivy.abs)
def _(my_array, ivy_array):
my_array.data = abs(my_array.data)
ivy_array = ivy.abs(ivy_array)
return (my_array, ivy_array)
x = MyArray(-3)
y = ivy.array([1, -1])
xy = _(x, ivy_array=y)
x1 = xy[0]
y1 = xy[1]
assert x1.data == 3
assert all(y1 == ivy.array([1, 1]))
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=2,
),
)
def test_array_property_T(
dtype_x,
backend_fw,
test_flags,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ret = helpers.flatten_and_to_np(ret=x.T, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ivy_backend.matrix_transpose(data), backend=backend_fw
)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
# TODO: avoid using dummy fn_tree in property tests
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_array_property_data(
dtype_x,
backend_fw,
test_flags,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ret = helpers.flatten_and_to_np(ret=x.data, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(ret=data, backend=backend_fw)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_array_property_device(
dtype_x,
backend_fw,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.device, ivy_backend.dev(data), as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_array_property_dtype(
dtype_x,
backend_fw,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.dtype, ivy_backend.dtype(data), as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("complex")),
)
def test_array_property_imag(
dtype_x,
backend_fw,
test_flags,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ret = helpers.flatten_and_to_np(ret=x.imag, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(ret=ivy_backend.imag(x), backend=backend_fw)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_array_property_itemsize(
dtype_x,
backend_fw,
):
dtype, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.itemsize, ivy_backend.to_numpy(x).itemsize, as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
),
)
def test_array_property_mT(
dtype_x,
backend_fw,
test_flags,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ret = helpers.flatten_and_to_np(ret=x.mT, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ivy_backend.matrix_transpose(data), backend=backend_fw
)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
),
)
def test_array_property_ndim(
dtype_x,
backend_fw,
):
_, data, input_shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.ndim, len(input_shape), as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("complex")),
)
def test_array_property_real(
dtype_x,
backend_fw,
test_flags,
):
_, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ret = helpers.flatten_and_to_np(ret=x.real, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(ret=ivy_backend.real(x), backend=backend_fw)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend=test_flags.ground_truth_backend,
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
),
)
def test_array_property_shape(
dtype_x,
backend_fw,
):
_, data, input_shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.shape, ivy_backend.Shape(input_shape), as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
ret_shape=True,
min_num_dims=1,
),
)
def test_array_property_size(
dtype_x,
backend_fw,
):
_, data, input_shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
size_gt = 1
for dim in input_shape:
size_gt *= dim
ivy_backend.utils.assertions.check_equal(x.size, size_gt, as_array=False)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_array_property_strides(dtype_x, backend_fw):
dtype, data = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
data = ivy_backend.native_array(data[0])
x = ivy_backend.Array(data)
ivy_backend.utils.assertions.check_equal(
x.strides, ivy_backend.to_numpy(x).strides, as_array=False
)
@handle_test(
fn_tree="functional.ivy.native_array", # dummy fn_tree
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_dim_size=3,
max_dim_size=3,
min_num_dims=3,
max_num_dims=3,
num_arrays=2,
min_value=3.0,
max_value=10.0,
),
op=st.sampled_from(
["!=", ">", "<", ">=", "<=", "*", "/", "%", "==", "&", "@", "**", "/"]
),
)
def test_dunder_wrapping(
dtype_x,
backend_fw,
test_flags,
op,
):
_, data = dtype_x
ivy.set_backend(backend_fw)
x = ivy.to_native(ivy.array(data[0]))
y = ivy.array(data[1])
assert ivy.is_ivy_array(y)
assert ivy.is_native_array(x)
res = eval(f"x {op} y")
assert ivy.is_ivy_array(res)
| ivy/ivy_tests/test_ivy/test_misc/test_array.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_array.py",
"repo_id": "ivy",
"token_count": 33985
} | 62 |
"""Collection of tests for the demos."""
# global
import pytest
# local
import ivy
import ivy.functional.backends.numpy
# functional api
def test_array(on_device):
import jax.numpy as jnp
assert ivy.concat((jnp.ones((1,)), jnp.ones((1,))), axis=-1).shape == (2,)
import tensorflow as tf
assert ivy.concat((tf.ones((1,)), tf.ones((1,))), axis=-1).shape == (2,)
import numpy as np
assert ivy.concat((np.ones((1,)), np.ones((1,))), axis=-1).shape == (2,)
import torch
assert ivy.concat((torch.ones((1,)), torch.ones((1,))), axis=-1).shape == (2,)
import paddle
assert ivy.concat((paddle.ones((1,)), paddle.ones((1,))), axis=-1).shape == (2,)
# Tests #
# ------#
# training
def test_training_demo(on_device, backend_fw):
if backend_fw == "numpy":
# numpy does not support gradients
pytest.skip()
ivy.set_backend(backend_fw)
class MyModel(ivy.Module):
def __init__(self):
self.linear0 = ivy.Linear(3, 64)
self.linear1 = ivy.Linear(64, 1)
ivy.Module.__init__(self)
def _forward(self, x):
x = ivy.relu(self.linear0(x))
return ivy.sigmoid(self.linear1(x))
model = MyModel()
optimizer = ivy.Adam(1e-4)
x_in = ivy.array([1.0, 2.0, 3.0])
target = ivy.array([0.0])
def loss_fn(v):
out = model(x_in, v=v)
return ivy.mean((out - target) ** 2)
for step in range(100):
loss, grads = ivy.execute_with_gradients(loss_fn, model.v)
model.v = optimizer.step(model.v, grads)
ivy.previous_backend()
| ivy/ivy_tests/test_ivy/test_misc/test_ivy_demos.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_ivy_demos.py",
"repo_id": "ivy",
"token_count": 737
} | 63 |
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import functools
import time
import os
import copy
import importlib
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import ivy
sns.set()
LINE_UP = "\033[1A"
LINE_CLEAR = "\x1b[2K"
COLUMNS = [
"exp no.",
"label",
"backend",
"device",
"eager time",
"graph time",
"percent_speed_up",
]
class _AvoidGPUPreallocation:
def __init__(self, backend):
self._backend = backend
if backend == "tensorflow":
self.tf = importlib.import_module("tensorflow")
def __enter__(self):
if self._backend == "tensorflow":
gpus = self.tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
self.tf.config.experimental.set_memory_growth(gpu, True)
elif self._backend == "jax":
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
def __exit__(self, exc_type, exc_val, exc_tb):
if self._backend == "tensorflow":
gpus = self.tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
self.tf.config.experimental.set_memory_growth(gpu, True)
elif self._backend == "jax":
del os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"]
if self and (exc_type is not None):
raise exc_val
return self
def _move_to_device(args=None, kwargs=None, device="cpu"):
args_idxs = ivy.nested_argwhere(args, ivy.is_array)
kwargs_idxs = ivy.nested_argwhere(kwargs, ivy.is_array)
def func(x):
return ivy.to_device(x, device, out=x)
if args is not None:
args = ivy.map_nest_at_indices(args, args_idxs, func)
if kwargs is not None:
kwargs = ivy.map_nest_at_indices(kwargs, kwargs_idxs, func)
return args, kwargs
def _compute_time(fn: Callable) -> Callable:
@functools.wraps(fn)
def new_fn(*args, **kwargs):
start = time.time()
fn(*args, **kwargs)
end = time.time()
return round(end - start, 6)
return new_fn
def _read_or_create_csv(output_path="./report.csv"):
if not os.path.exists(output_path):
with open(output_path, "w") as f:
f.write(",".join(COLUMNS) + "\n")
return pd.read_csv(output_path)
def _write_to_csv(df, row_list, output_path="./report.csv"):
row = dict(zip(COLUMNS, row_list))
df = df.append(row, ignore_index=True)
df.to_csv(output_path, index=False)
def eager_benchmark(
obj: Union[Callable, str],
functional_api: bool = False,
num_experiments: int = 1,
label: Optional[str] = None,
backends: Optional[List[str]] = None,
devices: Optional[List[str]] = None,
args: Optional[Tuple[Any]] = None,
kwargs: Optional[Dict[str, Any]] = None,
output_path="./report.csv",
):
"""Benchmark the function or module passed in input on the required
backends and devices.
Parameters
----------
obj
The function or module to be benchmarked with and without graph compilation.
In case of a function from ivy's functional API, this parameter would receive
a string which is the function name, along with functional_api set to True.
functional_api
Should only be set to ``True`` if the obj being passed is a part of ivy's
functional API. (Default value = ``False``).
num_experiments
Option to run benchmarking multiple times to account for subtle variations.
(Default value = 1).
label
The preferred name for the experiment as would be added to the csv. If no
name is provided, then the __name__ of the obj would be picked by default
(Default value = ``None``).
backends
A list of strings for backends to benchmark with. Should be among the backends
that ivy supports (Default value = ``None``).
devices
A list of target devices that ivy supports with the backends. The devices that
are invalid for a particular backend would be ignored
(Default value = ``None``).
args
The positional arguments to be passed to the obj.
kwargs
The keyword arguments to be passed to obj.
output_path
The path to the csv file to write to. By default results are written to
reports.csv in the folder from where the script it run
(Default value = ``None``).
Examples
--------
With an :code:`ivy` function:
>>> import ivy
>>> from benchmark import eager_benchmark
>>> ivy.set_backend("torch")
>>> fn = "conv1d"
>>> args = (
... ivy.array([[[0.0], [3.0], [0.0]]], device="cpu"),
... ivy.array([[[0.0]], [[1.0]], [[0.0]]], device="cpu"),
... (1,),
... "SAME",
... )
>>> kwargs = {"data_format": "NWC", "dilations": (1,)}
>>> eager_benchmark(
... fn,
... label="conv1d",
... backends=["jax", "numpy", "tensorflow", "torch"],
... devices=["cpu", "gpu:0"],
... args=args,
... kwargs=kwargs,
... functional_api=True,
... output_path="./ivy/report.csv"
... )
With a compositional function:
>>> import ivy
>>> from benchmark import eager_benchmark
>>> ivy.set_backend("torch")
>>> def fn(*args, **kwargs):
... return ivy.conv1d(*args, **kwargs) + 1
>>> args = (
... ivy.array([[[0.0], [3.0], [0.0]]], device="cpu"),
... ivy.array([[[0.0]], [[1.0]], [[0.0]]], device="cpu"),
... (1,),
... "SAME",
... )
>>> kwargs = {"data_format": "NWC", "dilations": (1,)}
>>> eager_benchmark(
... fn,
... label="compos",
... backends=["jax", "numpy", "tensorflow", "torch"],
... devices=["cpu", "gpu:0"],
... args=args,
... kwargs=kwargs,
... output_path="./ivy/report.csv"
... )
With a module:
>>> import ivy
>>> from benchmark import eager_benchmark
>>> ivy.set_backend("torch")
>>> module = ivy.GELU(approximate=False)
>>> args = (ivy.random_uniform(shape=(4, 32)),)
>>> eager_benchmark(
... module,
... label="GELU",
... backends=["jax", "numpy", "tensorflow", "torch"],
... devices=["cpu", "gpu:0"],
... args=args,
... output_path="./ivy/report.csv"
... )
"""
backends = ivy.default(backends, [])
devices = ivy.default(devices, [])
output_path = ivy.default(output_path, "./report.csv")
print("\nBenchmarking backends : " + " ".join(backends))
print(f"Number of experiments : {num_experiments}" + "\n")
for i in range(num_experiments):
if num_experiments > 1:
print("====================")
print(f"Experiment {i + 1}")
print("====================\n")
for backend in backends:
with _AvoidGPUPreallocation(backend) as _:
print("------------------------------------------------\n")
print(f"backend : {backend}")
ivy.set_backend(backend, dynamic=True)
valid_devices = [
device
for device in devices
if device.split(":")[0] not in ivy.invalid_devices
]
for device in valid_devices:
print(f"device : {device}")
obj_call = obj
if functional_api:
obj_call = ivy.__dict__[obj]
for i, device in enumerate(valid_devices):
args, kwargs = _move_to_device(
args=args, kwargs=kwargs, device=device
)
if isinstance(obj_call, ivy.Module):
obj_call_copy = copy.deepcopy(obj_call)
obj_call_copy.trace(args=args, kwargs=kwargs)
traced_fn = obj_call_copy
else:
traced_fn = ivy.trace(obj_call, args=args, kwargs=kwargs)
kwargs = ivy.default(kwargs, {})
args = ivy.default(args, ())
untraced_time = _compute_time(obj_call)(*args, **kwargs)
traced_time = _compute_time(traced_fn)(*args, **kwargs)
label = obj_call.__name__ if label is None else label
percent_speed_up = round(
abs(untraced_time - traced_time) / untraced_time * 100, 6
)
df = _read_or_create_csv(output_path)
_write_to_csv(
df,
[
len(df.index),
label,
backend,
device,
untraced_time,
traced_time,
percent_speed_up,
],
output_path,
)
args, kwargs = _move_to_device(
args=args, kwargs=kwargs, device="cpu"
)
ivy.clear_cached_mem_on_dev(device)
print(LINE_UP * (len(valid_devices) - i), end=LINE_CLEAR)
print(f"device : {device}\t --> done\n")
ivy.unset_backend()
print(f"Results written to {output_path} ...")
def visualize_speed_up(
file_path: Optional[str] = None,
output_path: Optional[str] = None,
devices: Union[List[str], str] = "all",
backends: Union[List[str], str] = "all",
labels: Optional[Union[List[str], str]] = None,
):
"""Visualize the speed up results stored in the csv.
Parameters
----------
file_path
The path of the csv file where the results are stored.
output_path
The path to the png file to store the graphs in.
devices
A filter for the devices for which graphs should be generated.
backends
A filter for the backends for which graphs should be generated.
labels
A filter for the labels for which graphs should be generated.
Examples
--------
Visualize for given set of devices and backends:
>>> from benchmark import visualize_speed_up
>>> visualize_speed_up(
... file_path="./ivy/report.csv",
... output_path="./ivy/save_fig.png",
... backends=["torch", "jax"],
... devices=["cpu", "gpu:0"],
... )
Visualize for a specific experiment label:
>>> from benchmark import visualize_speed_up
>>> visualize_speed_up(
... file_path="./ivy/report.csv",
... output_path="./ivy/save_fig.png",
... backends=["jax"],
... devices=["gpu:0"],
... labels=["GELU"],
... )
"""
file_path = ivy.default(file_path, "./report.csv")
output_path = ivy.default(output_path, "./saved_fig.png")
df = pd.read_csv(file_path)
df = df.query("label in @labels") if labels is not None else df
backends = list(df["backend"].unique()) if backends == "all" else backends
devices = list(df["device"].unique()) if devices == "all" else devices
fig, axes = plt.subplots(len(devices), len(backends))
fig.set_figwidth(30)
fig.set_figheight(12)
fig.tight_layout(pad=10.0)
axes = axes if isinstance(axes, np.ndarray) else np.asarray([axes])
while len(axes.shape) < 2:
if len(devices) > len(backends):
axes = np.expand_dims(axes, len(axes.shape))
else:
axes = np.expand_dims(axes, 0)
for device, axis in zip(devices, axes):
for backend, ax in zip(backends, axis):
ax.set_title(f"{backend} : {device}", {"fontsize": 18})
ax.set_ylabel("Percent Speed up on compiling", {"fontsize": 18})
ax.tick_params(axis="both", labelsize=15)
query = df.query("backend == @backend and device == @device")
if not query.empty:
ax.violinplot(query["percent_speed_up"])
else:
warnings.warn(
f"No records matching the filters passedbackend={backend} and"
f" device={device}"
)
plt.savefig(output_path)
print(f"plot saved to {output_path} ...")
| ivy/scripts/eager_mode_benchmark/benchmark.py/0 | {
"file_path": "ivy/scripts/eager_mode_benchmark/benchmark.py",
"repo_id": "ivy",
"token_count": 5837
} | 64 |
import sys
run = int(sys.argv[1])
backends = ["numpy", "jax", "tensorflow", "torch"]
submodules = [
"creation",
"device",
"dtype",
"elementwise",
"general",
"gradients",
"linalg",
"manipulation",
"meta",
"nest",
"random",
"searching",
"set",
"sorting",
"statistical",
"utility",
]
N = len(backends)
M = len(submodules)
num_tests = N * M
run %= num_tests
i = run // M
j = run % M
backend = backends[i]
submodule = submodules[j]
with open("./fwsubmod.txt", "w") as outfile:
outfile.write(f"{backend}-{submodule}")
with open("./backend.txt", "w") as f:
f.write(f"{backend}")
with open("./submodule.txt", "w") as f:
f.write(f"test_{submodule}")
| ivy/scripts/setup_tests/run_ivy_core_test.py/0 | {
"file_path": "ivy/scripts/setup_tests/run_ivy_core_test.py",
"repo_id": "ivy",
"token_count": 341
} | 65 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/ivy.iml" filepath="$PROJECT_DIR$/.idea/ivy.iml" />
</modules>
</component>
</project>
| ivy/.idea/modules.xml/0 | {
"file_path": "ivy/.idea/modules.xml",
"repo_id": "ivy",
"token_count": 105
} | 0 |
import sys
from pymongo import MongoClient
action_url = "https://github.com/unifyai/ivy/actions/runs/"
test_configs = {
"test-array-api": ["array_api", 0],
"test-core-ivy": ["ivy_core", 1],
"test-nn-ivy": ["ivy_nn", 2],
"test-stateful-ivy": ["ivy_stateful", 3],
"test-frontend-tensorflow-push": ["tf_frontend", 4],
"test-frontend-numpy-push": ["numpy_frontend", 5],
"test-frontend-jax-push": ["jax_frontend", 6],
"test-frontend-torch-push": ["torch_frontend", 7],
"test-experimental-core-ivy": ["experimental_core", 8],
"test-experimental-nn-ivy": ["experimental_nn", 9],
}
result_config = {
"success": "https://img.shields.io/badge/-success-success",
"failure": "https://img.shields.io/badge/-failure-red",
}
def make_clickable(url, name):
return (
f'<a href="{url}" rel="noopener noreferrer" '
+ f'target="_blank"><img src={name}></a>'
)
def update_test_results():
key, workflow, fw_submod, result, run_id = (
str(sys.argv[1]),
str(sys.argv[2]),
str(sys.argv[3]),
str(sys.argv[4]),
str(sys.argv[5]),
)
backend = fw_submod.split("-")[0]
submodule = fw_submod.split("-")[1]
cluster = MongoClient(
f"mongodb+srv://deep-ivy:{key}@cluster0.qdvf8q3.mongodb.net/?retryWrites=true&w=majority" # noqa
)
db = cluster["Ivy_tests_multi"]
collection = db[test_configs[workflow][0]]
res = make_clickable(action_url + run_id, result_config[result])
collection.update_one(
{"_id": test_configs[workflow][1]},
{"$set": {f"{backend}.{submodule}": res}},
upsert=True,
)
return
if __name__ == "__main__":
update_test_results()
| ivy/automation_tools/dashboard_automation/update_db.py/0 | {
"file_path": "ivy/automation_tools/dashboard_automation/update_db.py",
"repo_id": "ivy",
"token_count": 795
} | 1 |
{
"tensorflow": ["tensorflow-macos", "tensorflow-probability"],
"jax": ["jaxlib", "dm-haiku", "flax"],
"torch": ["torch-scatter"],
"mxnet": ["mxnet"]
}
| ivy/docker/requirement_mappings_apple_silicon.json/0 | {
"file_path": "ivy/docker/requirement_mappings_apple_silicon.json",
"repo_id": "ivy",
"token_count": 81
} | 2 |
Helpful Resources
=================
Here, we list out a few resources that you may find helpful when climbing the steep Ivy learning curve.
**Docs for respective Backends**
`Tensorflow Documentation <https://www.tensorflow.org/api_docs>`_, `PyTorch Documentation <https://pytorch.org/docs>`_, `NumPy Documentation <https://numpy.org/doc/stable/reference/>`_ and `Jax Documentation <https://jax.readthedocs.io/>`_ are the most useful resources to find your way through the behaviours from different backends.
These are the most important resources when working on Docstrings, Ivy Frontends, and Ivy Frontends tests.
**Python - Reference**
`realpython <https://realpython.com/>`_ and `pynative <https://pynative.com/>`_ are very useful for any kind of help regarding Python.
**Stack Exchange/ Stack Overflow**
A good platform to search for any sort of information regarding python and ML.
Useful when working on almost any section in the Deep Dive.
**Co-Pilot**
GitHub Co-Pilot can be used to write any bit of code in Ivy.
They are often very useful when developing code and also help get things done faster.
**GitHub - Reference**
`Git docs <https://git-scm.com/doc>`_ is the first place you must head to when you are stuck with any issue related to git.
**IDE extension for spell checking**
Though this may sound odd, a spell-checking extension is very useful to people contributing to Ivy when adding docstrings.
**Docker**
`Docker Documentation <https://docs.docker.com/>`_ is the best place to learn more about docker.
**Github Actions**
`GitHub Actions <https://docs.github.com/en/actions>`_ can be the best place to understand Continuous Integration and how testing is done to keep our repo error free.
| ivy/docs/overview/contributing/helpful_resources.rst/0 | {
"file_path": "ivy/docs/overview/contributing/helpful_resources.rst",
"repo_id": "ivy",
"token_count": 467
} | 3 |
Exception Handling
==================
.. _`exception handling thread`: https://discord.com/channels/799879767196958751/1189908450570928149
.. _`discord`: https://discord.gg/sXyFF8tDtm
As Ivy is unifying multiple backends, various issues are seen during exception handling:
#. each backend throws its own exceptions
#. exceptions thrown are backend-specific, therefore inconsistent
To unify the handling of exceptions and assertions, Ivy includes a custom exception class and decorator, which are explained further in the following sub-sections.
Ivy Exception Class
-------------------
Firstly, Ivy's base exception class is :code:`IvyException` class, which inherits from the Python :code:`Exception` class.
.. code-block:: python
# in ivy/utils/exceptions.py
class IvyException(Exception):
def __init__(self, *messages, include_backend=False):
self.native_error = (
messages[0]
if len(messages) == 1
and isinstance(messages[0], Exception)
and not include_backend
else None
)
if self.native_error is None:
super().__init__(
_combine_messages(*messages, include_backend=include_backend)
)
else:
super().__init__(str(messages[0]))
In cases where an exception class for a specific purpose is required, we inherit from the :code:`IvyException` class.
For example, the :code:`IvyBackendException` class is created to unify backend exceptions.
.. code-block:: python
# in ivy/utils/exceptions.py
class IvyBackendException(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
In some Array API tests, :code:`IndexError` and :code:`ValueError` are explicitly tested to ensure that the functions are behaving correctly.
Thus, the :code:`IvyIndexError` and :code:`IvyValueError` classes unifies these special cases.
For a more general case, the :code:`IvyError` class can be used.
.. code-block:: python
# in ivy/utils/exceptions.py
class IvyError(IvyException):
def __init__(self, *messages, include_backend=False):
super().__init__(*messages, include_backend=include_backend)
More Custom Exception classes were created to unify sub-categories of errors. We try our best to ensure that the same type of
Exception is raised for the same type of Error regardless of the backend.
This will ensure that the exceptions are truly unified for all the different types of errors.
The implementations of these custom classes are exactly the same as :code:`IvyError` class.
Currently there are 5 custom exception classes in ivy.
1. :code:`IvyIndexError`: This Error is raised for anything Indexing related. For Instance, providing out of bound axis in any function.
2. :code:`IvyValueError`: This is for anything related to providing wrong values. For instance, passing :code:`high` value
smaller than :code:`low` value in :code:`ivy.random_uniform`.
3. :code:`IvyAttributeError`: This is raised when an undefined attribute is referenced.
4. :code:`IvyBroadcastShapeError`: This is raised whenever 2 shapes are expected to be broadcastable but are not.
5. :code:`IvyDtypePromotionError`: Similar to :code:`IvyBroadcastShapeError`, this is raised when 2 dtypes are expected to be promotable but are not.
The correct type of Exception class should be used for the corresponding type of error across the backends. This will truly unify all the exceptions raised in Ivy.
Configurable Mode for Stack Trace
---------------------------------
Ivy's transpilation nature allows users to write code in their preferred frontend
framework and then execute it with a different backend framework. For example, a
user who is comfortable with NumPy can use Ivy's NumPy frontend to run their code
with a JAX backend. However, since they may have no prior experience with JAX or
other backend frameworks, they may not want to encounter stack traces that traverse
Ivy and JAX functions. In such cases, it may be preferable for the user to avoid
encountering stack traces that extend through Ivy and JAX functions.
Therefore, options are made available for the stack traces to either truncate
at the frontend or ivy level, or in other cases, no truncation at all.
Let's look at the 3 different modes with an example of :code:`ivy.all` below!
1. Full
This is the default mode and keeps the complete stack traces. All :code:`numpy`
frontend, ivy specific, and native :code:`jax` stack traces are displayed.
The format of the error displayed in this mode is :code:`Ivy error: backend name: backend function name: native error: error message`
.. code-block:: none
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
File "/ivy/ivy/utils/exceptions.py", line 198, in _handle_exceptions
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 911, in _handle_nestable
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 392, in _handle_array_like_without_promotion
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 805, in _handle_out_argument
return fn(*args, out=out, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 432, in _inputs_to_native_arrays
return fn(*new_args, **new_kwargs)
File "/ivy/ivy/func_wrapper.py", line 535, in _outputs_to_ivy_arrays
ret = fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 349, in _handle_array_function
return fn(*args, **kwargs)
File "/ivy/ivy/functional/backends/jax/utility.py", line 22, in all
raise ivy.utils.exceptions.IvyIndexError(error)
During the handling of the above exception, another exception occurred:
File "/ivy/other_test.py", line 22, in <module>
ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 523, in _handle_numpy_out
return fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 396, in _outputs_to_numpy_arrays
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 352, in _inputs_to_ivy_arrays_np
return fn(*ivy_args, **ivy_kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 453, in _from_zero_dim_arrays_to_scalar
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
File "/ivy/ivy/utils/exceptions.py", line 217, in _handle_exceptions
raise ivy.utils.exceptions.IvyIndexError(
IvyIndexError: jax: all: ValueError: axis 2 is out of bounds for an array of dimension 1
2. Frontend-only
This option displays only frontend-related stack traces. If compared with the
stack traces in the :code:`full` mode above, the :code:`jax` related traces
are pruned. Only the :code:`numpy` frontend related errors are shown.
A message is also displayed to inform that the traces are truncated and
the instructions to switch it back to the :code:`full` mode is included.
In this case, the format of the error is :code:`Ivy error: backend name: backend function name: error message`
.. code-block:: none
>>> ivy.set_exception_trace_mode('frontend')
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
<stack trace is truncated to frontend specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
During the handling of the above exception, another exception occurred:
<stack trace is truncated to frontend specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 523, in _handle_numpy_out
return fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 396, in _outputs_to_numpy_arrays
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 352, in _inputs_to_ivy_arrays_np
return fn(*ivy_args, **ivy_kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 453, in _from_zero_dim_arrays_to_scalar
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
IvyIndexError: jax: all: axis 2 is out of bounds for an array of dimension 1
3. Ivy specific
This option displays only ivy-related stack traces. If compared to the different
stack traces modes above, the ivy backend :code:`jax` related
traces (which were hidden in the :code:`frontend` mode) are available again
and the ivy frontend :code:`numpy` related traces remain visible.
However, the native :code:`jax` traces remain hidden because they are not
ivy-specific.
A message is also displayed to inform that the traces are truncated and the
instructions to switch it back to the :code:`full` mode is included.
The format of the error displayed is the same as the :code:`frontend` mode above.
.. code-block:: none
>>> ivy.set_exception_trace_mode('ivy')
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
<stack trace is truncated to ivy specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
File "/ivy/ivy/utils/exceptions.py", line 198, in _handle_exceptions
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 911, in _handle_nestable
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 392, in _handle_array_like_without_promotion
return fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 805, in _handle_out_argument
return fn(*args, out=out, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 432, in _inputs_to_native_arrays
return fn(*new_args, **new_kwargs)
File "/ivy/ivy/func_wrapper.py", line 535, in _outputs_to_ivy_arrays
ret = fn(*args, **kwargs)
File "/ivy/ivy/func_wrapper.py", line 349, in _handle_array_function
return fn(*args, **kwargs)
File "/ivy/ivy/functional/backends/jax/utility.py", line 22, in all
raise ivy.utils.exceptions.IvyIndexError(error)
During the handling of the above exception, another exception occurred:
<stack trace is truncated to ivy specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
File "/ivy/other_test.py", line 21, in <module>
ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 523, in _handle_numpy_out
return fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 396, in _outputs_to_numpy_arrays
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 352, in _inputs_to_ivy_arrays_np
return fn(*ivy_args, **ivy_kwargs)
File "/ivy/ivy/functional/frontends/numpy/func_wrapper.py", line 453, in _from_zero_dim_arrays_to_scalar
ret = fn(*args, **kwargs)
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
File "/ivy/ivy/utils/exceptions.py", line 217, in _handle_exceptions
raise ivy.utils.exceptions.IvyIndexError(
IvyIndexError: jax: all: axis 2 is out of bounds for an array of dimension 1
Ivy :code:`func_wrapper` Pruning
--------------------------------
Due to the wrapping operations in Ivy, a long list of less informative
:code:`func_wrapper` traces is often seen in the stack.
Including all of these wrapper functions in the stack trace can be very
unwieldy, thus they can be prevented entirely by setting
:code:`ivy.set_show_func_wrapper_trace_mode(False)`.
Examples are shown below to demonstrate the combination of this mode and the
3 different stack traces mode explained above.
1. Full
The :code:`func_wrapper` related traces have been hidden. All other traces
such as ivy-specific, frontend-related and the native traces remain visible.
A message is displayed as well to the user so that they are aware of the
pruning. The instructions to recover the :code:`func_wrapper` traces are
shown too.
.. code-block:: none
>>> ivy.set_show_func_wrapper_trace_mode(False)
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
File "/ivy/ivy/utils/exceptions.py", line 198, in _handle_exceptions
return fn(*args, **kwargs)
File "/ivy/ivy/functional/backends/jax/utility.py", line 22, in all
raise ivy.utils.exceptions.IvyIndexError(error)
During the handling of the above exception, another exception occurred:
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
File "/ivy/other_test.py", line 22, in <module>
ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
File "/ivy/ivy/utils/exceptions.py", line 217, in _handle_exceptions
raise ivy.utils.exceptions.IvyIndexError(
IvyIndexError: jax: all: ValueError: axis 2 is out of bounds for an array of dimension 1
2. Frontend-only
In the frontend-only stack trace mode, the ivy backend wrapping traces were
hidden but the frontend wrappers were still visible. By configuring the func
wrapper trace mode, the frontend wrappers will also be hidden. This can be
observed from the example below.
.. code-block:: none
>>> ivy.set_exception_trace_mode('frontend')
>>> ivy.set_show_func_wrapper_trace_mode(False)
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
<stack trace is truncated to frontend specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
During the handling of the above exception, another exception occurred:
<stack trace is truncated to frontend specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
IvyIndexError: jax: all: axis 2 is out of bounds for an array of dimension 1
3. Ivy specific
As the wrappers occur in :code:`ivy` itself, all backend and frontend wrappers
remain visible in the ivy-specific mode. By hiding the func wrapper traces,
the stack becomes cleaner and displays the ivy backend and frontend
exception messages only.
.. code-block:: none
>>> ivy.set_exception_trace_mode('frontend')
>>> ivy.set_show_func_wrapper_trace_mode(False)
>>> ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
<stack trace is truncated to ivy specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
File "/ivy/ivy/utils/exceptions.py", line 198, in _handle_exceptions
return fn(*args, **kwargs)
File "/ivy/ivy/functional/backends/jax/utility.py", line 22, in all
raise ivy.utils.exceptions.IvyIndexError(error)
During the handling of the above exception, another exception occurred:
<stack trace is truncated to ivy specific files, call `ivy.set_exception_trace_mode('full')` to view the full trace>
<func_wrapper.py stack trace is squashed, call `ivy.set_show_func_wrapper_trace_mode(True)` in order to view this>
File "/ivy/other_test.py", line 22, in <module>
ivy.functional.frontends.numpy.all(ivy.array([1,2,3]), axis=2)
File "/ivy/ivy/functional/frontends/numpy/logic/truth_value_testing.py", line 24, in all
ret = ivy.all(a, axis=axis, keepdims=keepdims, out=out)
File "/ivy/ivy/utils/exceptions.py", line 217, in _handle_exceptions
raise ivy.utils.exceptions.IvyIndexError(
IvyIndexError: jax: all: axis 2 is out of bounds for an array of dimension 1
:code:`@handle_exceptions` Decorator
----------------------------
To ensure that all backend exceptions are caught properly, a decorator is used to handle functions in the :code:`try/except` block.
.. code-block:: python
# in ivy/utils/exceptions.py
def handle_exceptions(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_exceptions(*args, **kwargs):
try:
return fn(*args, **kwargs)
# Not to rethrow as IvyBackendException
except IvyNotImplementedException as e:
raise e
except IvyError as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyError(fn.__name__, e, include_backend=True)
except IvyBroadcastShapeError as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyBroadcastShapeError(
fn.__name__, e, include_backend=True
)
except IvyDtypePromotionError as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyDtypePromotionError(
fn.__name__, e, include_backend=True
)
except (IndexError, IvyIndexError) as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyIndexError(
fn.__name__, e, include_backend=True
)
except (AttributeError, IvyAttributeError) as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyAttributeError(
fn.__name__, e, include_backend=True
)
except (ValueError, IvyValueError) as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyValueError(
fn.__name__, e, include_backend=True
)
except (Exception, IvyBackendException) as e:
_print_traceback_history()
raise ivy.utils.exceptions.IvyBackendException(
fn.__name__, e, include_backend=True
)
_handle_exceptions.handle_exceptions = True
return _handle_exceptions
The decorator is then added to each function for wrapping.
Let's look at an example of :func:`ivy.all`.
.. code-block:: python
# in ivy/functional/ivy/utility.py
@handle_exceptions
def all(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.current_backend(x).all(x, axis=axis, keepdims=keepdims, out=out)
When a backend throws an exception, it will be caught in the decorator and then the appropriate Error will be raised.
This ensures that all exceptions are consistent.
Let's look at the comparison of before and after adding the decorator.
**without decorator**
In NumPy,
.. code-block:: none
>>> x = ivy.array([0,0,1])
>>> ivy.all(x, axis=2)
<error_stack>
numpy.AxisError: axis 2 is out of bounds for an array of dimension 1
In PyTorch,
.. code-block:: none
>>> x = ivy.array([0,0,1])
>>> ivy.all(x, axis=2)
<error_stack>
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 2)
The errors raised are different across backends, therefore confusing and inconsistent.
**with decorator**
In NumPy,
.. code-block:: none
>>> x = ivy.array([0,0,1])
>>> ivy.all(x, axis=2)
<error_stack>
IvyIndexError: numpy: all: AxisError: axis 2 is out of bounds for an array of dimension 1
In PyTorch,
>>> x = ivy.array([0,0,1])
>>> ivy.all(x, axis=2)
<error_stack>
IvyIndexError: torch: all: IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 2)
The errors are unified into an :code:`IvyIndexError`, with the current backend and function stated to provide clearer information.
The message string is inherited from the native exception.
Consistency in Errors
---------------------
For consistency, we make sure that the same type of Exception is raised for the same type of error regardless of the backend set.
Let's take an example of :func:`ivy.all` again. In Jax, :code:`ValueError` is raised when the axis is out of bounds,
and for Numpy, :code:`AxisError` is raised. To unify the behaviour, we raise :code:`IvyIndexError` for both cases.
In Numpy,
.. code-block:: python
# in ivy/functional/backends/numpy/utility.py
def all(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
try:
return np.asarray(np.all(x, axis=axis, keepdims=keepdims, out=out))
except np.AxisError as e:
raise ivy.utils.exceptions.IvyIndexError(error)
In Jax,
.. code-block:: python
# in ivy/functional/backends/jax/utility.py
def all(
x: JaxArray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
x = jnp.array(x, dtype="bool")
try:
return jnp.all(x, axis, keepdims=keepdims)
except ValueError as error:
raise ivy.utils.exceptions.IvyIndexError(error)
In both cases, :code:`IvyIndexError` is raised, to make sure the same type of Exception is raised for this specific error.
Assertion Function
------------------
There are often conditions or limitations needed to ensure that a function is working correctly.
Inconsistency is observed such as some functions:
#. use :code:`assert` for checks and throw :code:`AssertionError`, or
#. use :code:`if/elif/else` blocks and raise :code:`Exception`, :code:`ValueError`, etc.
To unify the behaviours, our policy is to use conditional blocks and raise :code:`IvyException` whenever a check is required.
Moreover, to reduce code redundancy, conditions which are commonly used are collected as helper functions with custom parameters in :mod:`ivy/assertions.py`.
This allows them to be reused and promotes cleaner code.
Let's look at an example!
**Helper: check_less**
.. code-block:: python
# in ivy/utils/assertions.py
def check_less(x1, x2, allow_equal=False, message=""):
# less_equal
if allow_equal and ivy.any(x1 > x2):
raise ivy.exceptions.IvyException(
f"{x1} must be lesser than or equal to {x2}"
if message == ""
else message
)
# less
elif not allow_equal and ivy.any(x1 >= x2):
raise ivy.exceptions.IvyException(
f"{x1} must be lesser than {x2}"
if message == ""
else message
)
**ivy.set_split_factor**
.. code-block:: python
# in ivy/functional/ivy/device.py
@handle_exceptions
def set_split_factor(
factor: float,
device: Union[ivy.Device, ivy.NativeDevice] = None,
/,
) -> None:
ivy.assertions.check_less(0, factor, allow_equal=True)
global split_factors
device = ivy.default(device, default_device())
split_factors[device] = factor
Instead of coding a conditional block and raising an exception if the conditions are not met, a helper function is used to simplify the logic and increase code readability.
**Round Up**
This should have hopefully given you a good feel for how function wrapping is applied to functions in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `exception handling thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/eTc24eG9P_s" class="video">
</iframe>
| ivy/docs/overview/deep_dive/exception_handling.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/exception_handling.rst",
"repo_id": "ivy",
"token_count": 9289
} | 4 |
Building Blocks
===============
Here we explain the components of Ivy which are fundamental to its usage either as a code converter or as a fully-fledged framework-agnostic ML framework.
These are the 4 parts labelled as (a) in the image below:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/submodule_dependency_graph.png?raw=true
:align: center
:width: 100%
Backend Functional APIs ✅
--------------------------
The first important point to make is that, Ivy does not implement its own C++ or CUDA backend.
Instead, Ivy **wraps** the functional APIs of existing frameworks, bringing them into syntactic and semantic alignment.
Let’s take the function :func:`ivy.stack` as an example.
There are separate backend modules for JAX, TensorFlow, PyTorch, and NumPy, and so we implement the :code:`stack` method once for each backend, each in separate backend files like so:
.. code-block:: python
# ivy/functional/backends/jax/manipulation.py:
def stack(
arrays: Union[Tuple[JaxArray], List[JaxArray]],
/,
*,
axis: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.stack(arrays, axis=axis)
.. code-block:: python
# ivy/functional/backends/numpy/manipulation.py:
def stack(
arrays: Union[Tuple[np.ndarray], List[np.ndarray]],
/,
*,
axis: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.stack(arrays, axis, out=out)
stack.support_native_out = True
.. code-block:: python
# ivy/functional/backends/tensorflow/manipulation.py:
def stack(
arrays: Union[Tuple[tf.Tensor], List[tf.Tensor]],
/,
*,
axis: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.stack(arrays, axis)
.. code-block:: python
# ivy/functional/backends/torch/manipulation.py:
def stack(
arrays: Union[Tuple[torch.Tensor], List[torch.Tensor]],
/,
*,
axis: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.stack(arrays, axis, out=out)
stack.support_native_out = True
There were no changes required for this function, however NumPy and PyTorch both had to be marked as supporting the :ref:`overview/deep_dive/inplace_updates:out argument` natively.
For more complicated functions, we need to do more than simply wrap and maybe change the name.
For functions with differing behavior then we must modify the function to fit the unified in-out behavior of Ivy’s API.
For example, the APIs of JAX, PyTorch, and NumPy all have a :code:`logspace` method, but TensorFlow does not at the time of writing.
Therefore, we need to construct it using a composition of existing TensorFlow ops like so:
.. code-block:: python
# ivy/functional/backends/tensorflow/creation.py:
def logspace(
start: Union[tf.Tensor, tf.Variable, int],
stop: Union[tf.Tensor, tf.Variable, int],
num: int,
base: float = 10.0,
axis: Optional[int] = None,
*,
dtype: tf.DType,
device: str,
) -> Union[tf.Tensor, tf.Variable]:
power_seq = ivy.linspace(start, stop, num, axis, dtype=dtype, device=device)
return base**power_seq
Ivy Functional API ✅
---------------------
Calling the different backend files explicitly would work okay, but it would mean we need to :code:`import ivy.functional.backends.torch as ivy` to use a PyTorch backend or :code:`import ivy.functional.backends.tensorflow as ivy` to use a TensorFlow backend.
Instead, we allow these backends to be bound to the single shared namespace ivy.
The backend can then be changed by calling :code:`ivy.set_backend('torch')` for example.
:mod:`ivy.functional.ivy` is the submodule where all the doc strings and argument typing reside for the functional Ivy API.
For example, the function :func:`prod` is shown below:
.. code-block:: python
# ivy/functional/ivy/elementwise.py:
@to_native_arrays_and_back
@handle_out_argument
@handle_nestable
def prod(
x: Union[ivy.Array, ivy.NativeArray],
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the product of input array x elements.
x
input array. Should have a numeric data type.
axis
axis or axes along which products must be computed. By default, the product must
be computed over the entire array. If a tuple of integers, products must be
computed over multiple axes. Default: ``None``.
keepdims
bool, if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
dtype
data type of the returned array. If None,
if the default data type corresponding to the data type “kind” (integer or
floating-point) of x has a smaller range of values than the data type of x
(e.g., x has data type int64 and the default data type is int32, or x has data
type uint64 and the default data type is int64), the returned array must have
the same data type as x. if x has a floating-point data type, the returned array
must have the default floating-point data type. if x has a signed integer data
type (e.g., int16), the returned array must have the default integer data type.
if x has an unsigned integer data type (e.g., uint16), the returned array must
have an unsigned integer data type having the same number of bits as the default
integer data type (e.g., if the default integer data type is int32, the returned
array must have a uint32 data type). If the data type (either specified or
resolved) differs from the data type of x, the input array should be cast to the
specified data type before computing the product. Default: ``None``.
out
optional output array, for writing the result to.
Returns
-------
ret
array, if the product was computed over the entire array, a zero-dimensional
array containing the product; otherwise, a non-zero-dimensional array containing
the products. The returned array must have a data type as described by the dtype
parameter above.
>>> x = ivy.array([1, 2, 3])
>>> z = ivy.prod(x)
>>> print(z)
ivy.array(6)
>>> x = ivy.array([1, 0, 3])
>>> z = ivy.prod(x)
>>> print(z)
ivy.array(0)
"""
return current_backend(x).prod(
x, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
Implicitly, Ivy sets numpy as the default backend or operates with the backend corresponding to the specified data inputs
until the user explicitly sets a different backend.
The examples can be seen below:
+----------------------------------------+----------------------------------------------------+
| | |
|.. code-block:: python |.. code-block:: python |
| | |
| # implicit | # explicit |
| import ivy | import ivy |
| x = ivy.array([1, 2, 3]) | ivy.set_backend("jax") |
| (type(ivy.to_native(x))) | |
| # -> <class 'numpy.ndarray'> | z = ivy.array([1, 2, 3])) |
| | type(ivy.to_native(z)) |
| import torch | # -> <class 'jaxlib.xla_extension.DeviceArray'> |
| t = torch.tensor([23,42, -1]) | |
| type(ivy.to_native(ivy.sum(t))) | |
| # -> <class 'torch.Tensor'> | |
+----------------------------------------+----------------------------------------------------+
This implicit backend selection, and the use of a shared global ivy namespace for all backends, are both made possible via the backend handler.
Backend Handler ✅
------------------
All code for setting and unsetting the backend resides in the submodule at :mod:`ivy/utils/backend/handler.py`, and the front facing function is :func:`ivy.current_backend`.
The contents of this function are as follows:
.. code-block:: python
# ivy/utils/backend/handler.py
def current_backend(*args, **kwargs):
global implicit_backend
# if a global backend has been set with set_backend then this will be returned
if backend_stack:
f = backend_stack[-1]
if verbosity.level > 0:
verbosity.cprint(f"Using backend from stack: {f}")
return f
# if no global backend exists, we try to infer the backend from the arguments
f = _determine_backend_from_args(list(args) + list(kwargs.values()))
if f is not None:
if verbosity.level > 0:
verbosity.cprint(f"Using backend from type: {f}")
implicit_backend = f.current_backend_str()
return f
return importlib.import_module(_backend_dict[implicit_backend])
If a global backend framework has been previously set using for example :code:`ivy.set_backend('tensorflow')`, then this globally set backend is returned.
Otherwise, the input arguments are type-checked to infer the backend, and this is returned from the function as a callable module with all bound functions adhering to the specific backend.
The functions in this returned module are populated by iterating through the global :attr:`ivy.__dict__` (or a non-global copy of :attr:`ivy.__dict__` if non-globally-set), and overwriting every function which is also directly implemented in the backend-specific namespace.
The following is a slightly simplified version of this code for illustration, which updates the global :attr:`ivy.__dict__` directly:
.. code-block:: python
# ivy/utils/backend/handler.py
def set_backend(backend: str):
# un-modified ivy.__dict__
global ivy_original_dict
if not backend_stack:
ivy_original_dict = ivy.__dict__.copy()
# add the input backend to the global stack
backend_stack.append(backend)
# iterate through original ivy.__dict__
for k, v in ivy_original_dict.items():
# if method doesn't exist in the backend
if k not in backend.__dict__:
# add the original ivy method to backend
backend.__dict__[k] = v
# update global ivy.__dict__ with this method
ivy.__dict__[k] = backend.__dict__[k]
# maybe log to the terminal
if verbosity.level > 0:
verbosity.cprint(
f'Backend stack: {backend_stack}'
)
The functions implemented by the backend-specific backend such as :code:`ivy.functional.backends.torch` only constitute a subset of the full Ivy API.
This is because many higher level functions are written as a composition of lower level Ivy functions.
These functions therefore do not need to be written independently for each backend framework.
A good example is :func:`ivy.lstm_update`, as shown:
.. code-block:: python
# ivy/functional/ivy/layers.py
@to_native_arrays_and_back
@handle_nestable
def lstm_update(
x: Union[ivy.Array, ivy.NativeArray],
init_h: Union[ivy.Array, ivy.NativeArray],
init_c: Union[ivy.Array, ivy.NativeArray],
kernel: Union[ivy.Array, ivy.NativeArray],
recurrent_kernel: Union[ivy.Array, ivy.NativeArray],
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
recurrent_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Tuple[ivy.Array, ivy.Array]:
"""Perform long-short term memory update by unrolling time dimension of the input array.
Parameters
----------
x
input tensor of LSTM layer *[batch_shape, t, in]*.
init_h
initial state tensor for the cell output *[batch_shape, out]*.
init_c
initial state tensor for the cell hidden state *[batch_shape, out]*.
kernel
weights for cell kernel *[in, 4 x out]*.
recurrent_kernel
weights for cell recurrent kernel *[out, 4 x out]*.
bias
bias for cell kernel *[4 x out]*. (Default value = None)
recurrent_bias
bias for cell recurrent kernel *[4 x out]*. (Default value = None)
Returns
-------
ret
hidden state for all timesteps *[batch_shape,t,out]* and cell state for last
timestep *[batch_shape,out]*
"""
# get shapes
x_shape = list(x.shape)
batch_shape = x_shape[:-2]
timesteps = x_shape[-2]
input_channels = x_shape[-1]
x_flat = ivy.reshape(x, (-1, input_channels))
# input kernel
Wi = kernel
Wi_x = ivy.reshape(
ivy.matmul(x_flat, Wi) + (bias if bias is not None else 0),
batch_shape + [timesteps, -1],
)
Wii_x, Wif_x, Wig_x, Wio_x = ivy.split(Wi_x, 4, -1)
# recurrent kernel
Wh = recurrent_kernel
# lstm states
ht = init_h
ct = init_c
# lstm outputs
hts_list = []
# unrolled time dimension with lstm steps
for Wii_xt, Wif_xt, Wig_xt, Wio_xt in zip(
ivy.unstack(Wii_x, axis=-2),
ivy.unstack(Wif_x, axis=-2),
ivy.unstack(Wig_x, axis=-2),
ivy.unstack(Wio_x, axis=-2),
):
htm1 = ht
ctm1 = ct
Wh_htm1 = ivy.matmul(htm1, Wh) + (
recurrent_bias if recurrent_bias is not None else 0
)
Whi_htm1, Whf_htm1, Whg_htm1, Who_htm1 = ivy.split(
Wh_htm1, num_or_size_splits=4, axis=-1
)
it = ivy.sigmoid(Wii_xt + Whi_htm1)
ft = ivy.sigmoid(Wif_xt + Whf_htm1)
gt = ivy.tanh(Wig_xt + Whg_htm1)
ot = ivy.sigmoid(Wio_xt + Who_htm1)
ct = ft * ctm1 + it * gt
ht = ot * ivy.tanh(ct)
hts_list.append(ivy.expand_dims(ht, -2))
return ivy.concat(hts_list, -2), ct
We *could* find and wrap the functional LSTM update methods for each backend framework which might bring a small performance improvement, but in this case there are no functional LSTM methods exposed in the official functional APIs of the backend frameworks, and therefore the functional LSTM code which does exist for the backends is much less stable and less reliable for wrapping into Ivy.
Generally, we have made decisions so that Ivy is as stable and scalable as possible, minimizing dependencies to backend framework code where possible with minimal sacrifices in performance.
Tracer 🚧
-----------------
“What about performance?” I hear you ask.
This is a great point to raise!
With the design as currently presented, there would be a small performance hit every time we call an Ivy function by virtue of the added Python wrapping.
One reason we created the tracer was to address this issue.
The tracer takes in any Ivy function, backend function, or composition, and returns the computation graph using the backend functional API only.
The dependency graph for this process looks like this:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiler_dependency_graph.png?raw=true
:align: center
:width: 75%
Let's look at a few examples, and observe the traced graph of the Ivy code against the native backend code.
First, let's set our desired backend as PyTorch.
When we trace the three functions below, despite the fact that each
has a different mix of Ivy and PyTorch code, they all trace to the same graph:
+----------------------------------------+-----------------------------------------+-----------------------------------------+
|.. code-block:: python |.. code-block:: python |.. code-block:: python |
| | | |
| def pure_ivy(x): | def pure_torch(x): | def mix(x): |
| y = ivy.mean(x) | y = torch.mean(x) | y = ivy.mean(x) |
| z = ivy.sum(x) | z = torch.sum(x) | z = torch.sum(x) |
| f = ivy.var(y) | f = torch.var(y) | f = ivy.var(y) |
| k = ivy.cos(z) | k = torch.cos(z) | k = torch.cos(z) |
| m = ivy.sin(f) | m = torch.sin(f) | m = ivy.sin(f) |
| o = ivy.tan(y) | o = torch.tan(y) | o = torch.tan(y) |
| return ivy.concatenate( | return torch.cat( | return ivy.concatenate( |
| [k, m, o], -1) | [k, m, o], -1) | [k, m, o], -1) |
| | | |
| # input | # input | # input |
| x = ivy.array([[1., 2., 3.]]) | x = torch.tensor([[1., 2., 3.]]) | x = ivy.array([[1., 2., 3.]]) |
| | | |
| # create graph | # create graph | # create graph |
| graph = ivy.trace_graph( | graph = ivy.trace_graph( | graph = ivy.trace_graph( |
| pure_ivy, x) | pure_torch, x) | mix, x) |
| | | |
| # call graph | # call graph | # call graph |
| ret = graph(x) | ret = graph(x) | ret = graph(x) |
+----------------------------------------+-----------------------------------------+-----------------------------------------+
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiled_graph_a.png?raw=true
:align: center
:width: 75%
For all existing ML frameworks, the functional API is the backbone that underpins all higher level functions and classes.
This means that under the hood, any code can be expressed as a composition of ops in the functional API.
The same is true for Ivy.
Therefore, when compiling the graph with Ivy, any higher-level classes or extra code which does not directly contribute towards the computation graph is excluded.
For example, the following 3 pieces of code all result in the exact same computation graph when traced as shown:
+----------------------------------------+-----------------------------------------+-----------------------------------------+
|.. code-block:: python |.. code-block:: python |.. code-block:: python |
| | | |
| class Network(ivy.module) | def clean(x, w, b): | def unclean(x, w, b): |
| | return w*x + b | y = b + w + x |
| def __init__(self): | | print('message') |
| self._layer = ivy.Linear(3, 3) | | wx = w * x |
| super().__init__() | | ret = wx + b |
| | | temp = y * wx |
| def _forward(self, x): | | return ret |
| return self._layer(x) | | |
| | # input | # input |
| # build network | x = ivy.array([1., 2., 3.]) | x = ivy.array([1., 2., 3.]) |
| net = Network() | w = ivy.random_uniform( | w = ivy.random_uniform( |
| | -1, 1, (3, 3)) | -1, 1, (3, 3)) |
| # input | b = ivy.zeros((3,)) | b = ivy.zeros((3,)) |
| x = ivy.array([1., 2., 3.]) | | |
| | # trace graph | # trace graph |
| # trace graph | graph = ivy.trace_graph( | graph = ivy.trace_graph( |
| net.trace_graph(x) | clean, x, w, b) | unclean, x, w, b) |
| | | |
| # execute graph | # execute graph | # execute graph |
| net(x) | graph(x, w, b) | graph(x, w, b) |
+----------------------------------------+-----------------------------------------+-----------------------------------------+
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiled_graph_b.png?raw=true
:align: center
:width: 75%
This tracing is not restricted to just PyTorch.
Let's take another example, but trace to Tensorflow, NumPy, and JAX:
+------------------------------------+
|.. code-block:: python |
| |
| def ivy_func(x, y): |
| w = ivy.diag(x) |
| z = ivy.matmul(w, y) |
| return z |
| |
| # input |
| x = ivy.array([[1., 2., 3.]]) |
| y = ivy.array([[2., 3., 4.]]) |
| # create graph |
| graph = ivy.trace_graph( |
| ivy_func, x, y) |
| |
| # call graph |
| ret = graph(x, y) |
+------------------------------------+
Converting this code to a graph, we get a slightly different graph for each backend:
Tensorflow:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiled_graph_tf.png?raw=true
:align: center
:width: 75%
|
Numpy:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiled_graph_numpy.png?raw=true
:align: center
:width: 75%
|
Jax:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/compiled_graph_jax.png?raw=true
:align: center
:width: 75%
|
The example above further emphasizes that the tracer creates a computation graph consisting of backend functions, not Ivy functions.
Specifically, the same Ivy code is traced to different graphs depending on the selected backend.
However, when compiling native framework code, we are only able to trace a graph for that same framework.
For example, we cannot take torch code and trace this into tensorflow code.
However, we can transpile torch code into tensorflow code (see `Ivy as a Transpiler <ivy_as_a_transpiler.rst>`_ for more details).
The tracer is not a compiler and does not compile to C++, CUDA, or any other lower level language.
It simply traces the backend functional methods in the graph, stores this graph, and then efficiently traverses this graph at execution time, all in Python.
Compiling to lower level languages (C++, CUDA, TorchScript etc.) is supported for most backend frameworks via :func:`ivy.compile`, which wraps backend-specific compilation code, for example:
.. code-block:: python
# ivy/functional/backends/tensorflow/compilation.py
compile = lambda fn, dynamic=True, example_inputs=None,\
static_argnums=None, static_argnames=None:\
tf.function(fn)
.. code-block:: python
# ivy/functional/backends/torch/compilation.py
def compile(fn, dynamic=True, example_inputs=None,
static_argnums=None, static_argnames=None):
if dynamic:
return torch.jit.script(fn)
return torch.jit.trace(fn, example_inputs)
.. code-block:: python
# ivy/functional/backends/jax/compilation.py
compile = lambda fn, dynamic=True, example_inputs=None,\
static_argnums=None, static_argnames=None:\
jax.jit(fn, static_argnums=static_argnums,
static_argnames=static_argnames)
Therefore, the backend code can always be run with maximal efficiency by compiling into an efficient low-level backend-specific computation graph.
**Round Up**
Hopefully, this has painted a clear picture of the fundamental building blocks underpinning the Ivy framework, being the Backend functional APIs, Ivy functional API, Backend handler, and Tracer 😄
Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
| ivy/docs/overview/design/building_blocks.rst/0 | {
"file_path": "ivy/docs/overview/design/building_blocks.rst",
"repo_id": "ivy",
"token_count": 12955
} | 5 |
``ivy.transpile()``
===================
..
⚠️ **Warning**: The tracer and the transpiler are not publicly available yet, so certain parts of this doc won't work as expected as of now!
Ivy's Transpiler converts a function written in any framework into your framework of
choice, preserving all the logic between frameworks.
As the output of transpilation is native code in the target framework, it
can be used as if it was originally developed in that framework,
allowing you to apply and use framework-specific optimizations or tools.
This makes all ML-related projects available to you, independently of the framework you
want to use to research, develop, or deploy systems. So if you want to:
- Use functions and building blocks like neural networks, layers, activations, and
training pipelines published in other frameworks. Ex: Using Haiku modules in PyTorch to
get access to the latest model.
- Integrate code developed in other frameworks into your code. Ex: Use the Kornia
library in Jax for extra performance.
- Take advantage of specific features in other frameworks. Ex: Convert Jax code to Tensorflow for deployment.
Ivy's Transpiler is definitely the tool for the job 🔧
To convert the code, it traces a computational graph using the Tracer and
leverages Ivy's frontends and backends to link one framework to another. After swapping
each function node in the computational graph with their equivalent Ivy frontend
functions, the tracer removes all the wrapping in the frontends and replaces them with the native
functions of the target framework.
Transpiler API
--------------
.. py:function:: ivy.transpile(*objs, source = None, to = None, debug_mode = False, args = None, kwargs = None, params_v = None,)
Transpiles a ``Callable`` or set of them from a ``source`` framework to another framework. If ``args`` or ``kwargs`` are specified,
transpilation is performed eagerly, otherwise, transpilation will happen lazily.
:param objs: Native callable(s) to transpile.
:type objs: ``Callable``
:param source: The framework that ``obj`` is from. This must be provided unless ``obj`` is a framework-specific module.
:type source: ``Optional[str]``
:param to: The target framework to transpile ``obj`` to.
:type to: ``Optional[str]``
:param debug_mode: Whether to transpile to ivy first, before the final compilation to
the target framework. If the target is ivy, then this flag makes no
difference.
:type debug_mode: ``bool``
:param args: If specified, arguments that will be used to transpile eagerly.
:type args: ``Optional[Tuple]``
:param kwargs: If specified, keyword arguments that will be used to transpile eagerly.
:type kwargs: ``Optional[dict]``
:param params_v: Parameters of a haiku model, as when transpiling these, the parameters
need to be passed explicitly to the function call.
:rtype: ``Union[Graph, LazyGraph, ModuleType, ivy.Module, torch.nn.Module, tf.keras.Model, hk.Module]``
:return: A transpiled ``Graph`` or a non-initialized ``LazyGraph``. If the object is a native trainable module, the corresponding module in the target framework will be returned. If the object is a ``ModuleType``, the function will return a copy of the module with every method lazily transpiled.
Using the transpiler
--------------------
Similar to the ``ivy.trace`` function, ``ivy.unify`` and ``ivy.transpile`` can be used
eagerly and lazily. If you pass the necessary arguments, the function will be called
instantly, otherwise, transpilation will happen the first time you invoke the function
with the proper arguments.
In both cases, arguments or keyword arguments can be arrays from
either the ``source`` framework or the target (``to``) framework.
Transpiling functions
~~~~~~~~~~~~~~~~~~~~~
First, let's start transpiling some simple functions. In the snippet below, we transpile
a small JAX function to Torch both eagerly and lazily.
.. code-block:: python
import ivy
ivy.set_backend("jax")
# Simple JAX function to transpile
def test_fn(x):
return jax.numpy.sum(x)
x1 = ivy.array([1., 2.])
# Arguments are available -> transpilation happens eagerly
eager_graph = ivy.transpile(test_fn, source="jax", to="torch", args=(x1,))
# eager_graph is now torch code and runs efficiently
ret = eager_graph(x1)
# Arguments are not available -> transpilation happens lazily
lazy_graph = ivy.transpile(test_fn, source="jax", to="torch")
# The transpiled graph is initialized, transpilation will happen here
ret = lazy_graph(x1)
# lazy_graph is now torch code and runs efficiently
ret = lazy_graph(x1)
Transpiling Libraries
~~~~~~~~~~~~~~~~~~~~~
Likewise, you can use ``ivy.transpile`` to convert entire libraries and modules with just one line of
code!
.. code-block:: python
import ivy
import kornia
import requests
import jax.numpy as jnp
from PIL import Image
# transpile kornia from torch to jax
jax_kornia = ivy.transpile(kornia, source="torch", to="jax")
# get an image
url = "http://images.cocodataset.org/train2017/000000000034.jpg"
raw_img = Image.open(requests.get(url, stream=True).raw)
# convert it to the format expected by kornia
img = jnp.transpose(jnp.array(raw_img), (2, 0, 1))
img = jnp.expand_dims(img, 0) / 255
# and use the transpiled version of any function from the library!
out = jax_kornia.enhance.sharpness(img, 5)
Transpiling Modules
~~~~~~~~~~~~~~~~~~~
Last but not least, Ivy can also transpile trainable modules from one framework to
another, at the moment we support ``torch.nn.Module`` when ``to="torch"``,
``tf.keras.Model`` when ``to="tensorflow"``, and haiku models when ``to="jax"``.
.. code-block::
import ivy
import timm
import torch
import jax
import haiku as hk
# Get a pretrained pytorch model
mlp_encoder = timm.create_model("mixer_b16_224", pretrained=True, num_classes=0)
# Transpile it into a hk.Module with the corresponding parameters
noise = torch.randn(1, 3, 224, 224)
mlp_encoder = ivy.transpile(mlp_encoder, to="jax", args=(noise,))
# Build a classifier using the transpiled encoder
class Classifier(hk.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.encoder = mlp_encoder()
self.fc = hk.Linear(output_size=num_classes, with_bias=True)
def __call__(self, x):
x = self.encoder(x)
x = self.fc(x)
return x
def _forward_classifier(x):
module = Classifier()
return module(x)
# Transform the classifier and use it as a standard hk.Module
rng_key = jax.random.PRNGKey(42)
x = jax.random.uniform(key=rng_key, shape=(1, 3, 224, 224), dtype=jax.numpy.float32)
forward_classifier = hk.transform(_forward_classifier)
params = forward_classifier.init(rng=rng_key, x=x)
ret = forward_classifier.apply(params, None, x)
Sharp bits
----------
In a similar fashion to the trace, the transpiler is under development and we are
still working on some rough edges. These include:
1. **Keras model subclassing**: If a model is transpiled to keras, the resulting
``tf.keras.Model`` can not be used within a keras sequential model at the moment. If
you want to use the transpiled model as part of a more complex keras model, you can
`create a Model subclass
<https://www.tensorflow.org/guide/keras/custom_layers_and_models#the_model_class>`_.
Due to this, any training of a keras model should be done using a TensorFlow training
pipeline instead of the keras utils.
2. **Keras arguments**: Keras models require at least an argument to be passed, so if a
model from another framework that only takes ``kwargs`` is transpiled to keras,
you'll need to pass a ``None`` argument to the transpiled model before the
corresponding ``kwargs``.
3. **Haiku transform with state**: As of now, we only support the transpilation of
transformed Haiku modules, this means that ``transformed_with_state`` objects will
not be correctly transpiled.
4. **Array format between frameworks**: As the tracer outputs a 1-to-1 mapping of the
traced function, the format of the tensors is preserved when transpiling from a
framework to another. As an example, if you transpile a convolutional block from
PyTorch (which uses ``N, C, H, W``) to TensorFlow (which uses ``N, H, W, C``) and want
to use it as part of a bigger (TensorFlow) model, you'll need to include a permute statement for
the inference to be correct.
Keep in mind that the transpiler uses the Tracer under the hood, so the
:ref:`sharp bits of the tracer <overview/one_liners/trace:Sharp bits>`
apply here as well!
Examples
--------
Here, we are transpiling a HF model from torch to tensorflow and then using the
resulting model with tensorflow tensors directly:
.. code-block:: python
import ivy
from transformers import AutoImageProcessor, ResNetForImageClassification
from datasets import load_dataset
# Set backend to torch
ivy.set_backend("torch")
# Download the input image
dataset = load_dataset("huggingface/cats-image")
image = dataset["test"]["image"][0]
# Setting the model
image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
# Transpiling the model to tensorflow
tf_model = ivy.transpile(model, source="torch", to="tensorflow", kwargs=inputs)
# Using the transpiled model
tf_inputs = image_processor(image, return_tensors="tf")
ret = tf_model(None, **tf_inputs)
| ivy/docs/overview/one_liners/transpile.rst/0 | {
"file_path": "ivy/docs/overview/one_liners/transpile.rst",
"repo_id": "ivy",
"token_count": 2979
} | 6 |
#!/bin/bash -e
# For some reason torch needed to be installed sequentially before installing from
# requirements.txt
pip install torch || exit 1
pip install torch-scatter || exit 1
| ivy/docs/prebuild.sh/0 | {
"file_path": "ivy/docs/prebuild.sh",
"repo_id": "ivy",
"token_count": 48
} | 7 |
from .activations import _ArrayWithActivationsExperimental
from .conversions import _ArrayWithConversionsExperimental
from .creation import _ArrayWithCreationExperimental
from .data_type import _ArrayWithData_typeExperimental
from .device import _ArrayWithDeviceExperimental
from .elementwise import _ArrayWithElementWiseExperimental
from .general import _ArrayWithGeneralExperimental
from .gradients import _ArrayWithGradientsExperimental
from .image import _ArrayWithImageExperimental
from .layers import _ArrayWithLayersExperimental
from .linear_algebra import _ArrayWithLinearAlgebraExperimental
from .losses import _ArrayWithLossesExperimental
from .manipulation import _ArrayWithManipulationExperimental
from .norms import _ArrayWithNormsExperimental
from .random import _ArrayWithRandomExperimental
from .searching import _ArrayWithSearchingExperimental
from .set import _ArrayWithSetExperimental
from .sorting import _ArrayWithSortingExperimental
from .statistical import _ArrayWithStatisticalExperimental
from .utility import _ArrayWithUtilityExperimental
| ivy/ivy/data_classes/array/experimental/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/__init__.py",
"repo_id": "ivy",
"token_count": 259
} | 8 |
# global
import abc
from typing import Optional, Tuple
# local
import ivy
class _ArrayWithSearchingExperimental(abc.ABC):
def unravel_index(
self: ivy.Array,
shape: Tuple[int],
/,
*,
out: Optional[ivy.Array] = None,
) -> Tuple[ivy.Array]:
"""ivy.Array instance method variant of ivy.unravel_index. This method
simply wraps the function, and so the docstring for ivy.unravel_index
also applies to this method with minimal changes.
Parameters
----------
self
Input array.
shape
The shape of the array to use for unraveling indices.
out
optional output array, for writing the result to.
Returns
-------
ret
Tuple with arrays that have the same shape as the indices array.
Examples
--------
>>> indices = ivy.array([22, 41, 37])
>>> indices.unravel_index((7,6))
(ivy.array([3, 6, 6]), ivy.array([4, 5, 1]))
"""
return ivy.unravel_index(self._data, shape, out=out)
| ivy/ivy/data_classes/array/experimental/searching.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/searching.py",
"repo_id": "ivy",
"token_count": 489
} | 9 |
# global
import abc
from typing import Optional, Union, Literal, List
# local
import ivy
class _ArrayWithSorting(abc.ABC):
def argsort(
self: ivy.Array,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.argsort. This method simply
wraps the function, and so the docstring for ivy.argsort also applies
to this method with minimal changes.
Parameters
----------
self
input array.
axis
axis along which to sort. If set to ``-1``, the function
must sort along the last axis. Default: ``-1``.
descending
sort order. If ``True``, the returned indices sort ``x`` in descending order
(by value). If ``False``, the returned indices sort ``x`` in ascending order
(by value). Default: ``False``.
stable
sort stability. If ``True``, the returned indices
must maintain the relative order of ``x`` values
which compare as equal. If ``False``, the returned
indices may or may not maintain the relative order
of ``x`` values which compare as equal (i.e., the
relative order of ``x`` values which compare as
equal is implementation-dependent). Default: ``True``.
out
optional output array, for writing the result to. It must have the same
shape as input.
Returns
-------
ret
an array of indices. The returned array must have the same shape as ``x``.
The returned array must have the default array index data type.
Examples
--------
>>> x = ivy.array([1, 5, 2])
>>> y = x.argsort(axis=-1, descending=True, stable=False)
>>> print(y)
ivy.array([1, 2, 0])
>>> x = ivy.array([9.6, 2.7, 5.2])
>>> y = x.argsort(axis=-1, descending=True, stable=False)
>>> print(y)
ivy.array([0, 2, 1])
"""
return ivy.argsort(
self._data, axis=axis, descending=descending, stable=stable, out=out
)
def sort(
self: ivy.Array,
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sort. This method simply
wraps the function, and so the docstring for ivy.sort also applies to
this method with minimal changes.
Examples
--------
>>> x = ivy.array([7, 8, 6])
>>> y = x.sort(axis=-1, descending=True, stable=False)
>>> print(y)
ivy.array([8, 7, 6])
>>> x = ivy.array([8.5, 8.2, 7.6])
>>> y = x.sort(axis=-1, descending=True, stable=False)
>>> print(y)
ivy.array([8.5, 8.2, 7.6])
"""
return ivy.sort(
self._data, axis=axis, descending=descending, stable=stable, out=out
)
def msort(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.msort. This method simply
wraps the function, and so the docstring for ivy.msort also applies to
this method with minimal changes.
Parameters
----------
self
input array.
out
optional output array, for writing the result to.
Returns
-------
ret
sorted array of the same type and shape as a
Examples
--------
>>> a = ivy.asarray([[8, 9, 6],[6, 2, 6]])
>>> a.msort()
ivy.array(
[[6, 2, 6],
[8, 9, 6]]
)
"""
return ivy.msort(self._data, out=out)
def searchsorted(
self: ivy.Array,
v: Union[ivy.Array, ivy.NativeArray],
/,
*,
side: Literal["left", "right"] = "left",
sorter: Optional[Union[ivy.Array, ivy.NativeArray, List[int]]] = None,
ret_dtype: Union[ivy.Dtype, ivy.NativeDtype] = ivy.int64,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.searchsorted.
This method simply wraps the function, and so the docstring for
ivy.searchsorted also applies to this method with minimal
changes.
"""
return ivy.searchsorted(
self.data, v, side=side, sorter=sorter, ret_dtype=ret_dtype, out=out
)
| ivy/ivy/data_classes/array/sorting.py/0 | {
"file_path": "ivy/ivy/data_classes/array/sorting.py",
"repo_id": "ivy",
"token_count": 2178
} | 10 |
# global
from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithCreationExperimental(ContainerBase):
@staticmethod
def static_hann_window(
window_length: Union[int, ivy.Container],
periodic: Union[bool, ivy.Container] = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hann_window. This method
simply wraps the function, and so the docstring for ivy.hann_window
also applies to this method with minimal changes.
Parameters
----------
window_length
container including multiple window sizes.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that contains the Hann windows.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_hann(x)
{
a: ivy.array([0.0000, 0.7500, 0.7500])
b: ivy.array([0.0000, 0.3455, 0.9045, 0.9045, 0.3455])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hann_window",
window_length,
periodic,
dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def hann_window(
self: ivy.Container,
periodic: Union[bool, ivy.Container] = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.hann_window. This
method simply wraps the function, and so the docstring for
ivy.hann_window also applies to this method with minimal changes.
Parameters
----------
self
input container with window sizes.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output container, for writing the result to.
Returns
-------
ret
The container containing the Hann windows.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=3, b=5)
>>> ivy.hann_window(x)
{
a: ivy.array([0.0000, 0.7500, 0.7500])
b: ivy.array([0.0000, 0.3455, 0.9045, 0.9045, 0.3455])
}
"""
return self.static_hann_window(self, periodic, dtype, out=out)
@staticmethod
def static_kaiser_window(
window_length: Union[int, ivy.Container],
periodic: Union[bool, ivy.Container] = True,
beta: Union[float, ivy.Container] = 12.0,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.kaiser_window. This
method simply wraps the function, and so the docstring for
ivy.kaiser_window also applies to this method with minimal changes.
Parameters
----------
window_length
input container including window lengths.
periodic
If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Kaiser windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_kaiser_window(x, True, 5)
{
a: ivy.array([0.2049, 0.8712, 0.8712]),
a: ivy.array([0.0367, 0.7753, 0.7753]),
}
"""
return ContainerBase.cont_multi_map_in_function(
"kaiser_window",
window_length,
periodic,
beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def kaiser_window(
self: ivy.Container,
periodic: Union[bool, ivy.Container] = True,
beta: Union[float, ivy.Container] = 12.0,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.kaiser_window. This
method simply wraps the function, and so the docstring for
ivy.kaiser_window also applies to this method with minimal changes.
Parameters
----------
self
input container including window lengths.
periodic
If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Kaiser windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_kaiser_window(x, True, 5)
{
a: ivy.array([0.2049, 0.8712, 0.8712]),
a: ivy.array([0.0367, 0.7753, 0.7753]),
}
"""
return self.static_kaiser_window(
self,
periodic,
beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def static_kaiser_bessel_derived_window(
x: Union[int, ivy.Array, ivy.NativeArray, ivy.Container],
periodic: Union[bool, ivy.Container] = True,
beta: Union[float, ivy.Container] = 12.0,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of
ivy.kaiser_bessel_derived_window. This method simply wraps the
function, and so the docstring for ivy.kaiser_bessel_derived_window
also applies to this method with minimal changes.
Parameters
----------
x
input container including window lengths.
periodic
If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Kaiser Bessel Derived windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_kaiser_bessel_derived_window(x, True, 5)
{
a: ivy.array([0.70710677, 0.70710677]),
b: ivy.array([0.18493208, 0.9827513 , 0.9827513 , 0.18493208]),
}
"""
return ContainerBase.cont_multi_map_in_function(
"kaiser_bessel_derived_window",
x,
periodic,
beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def kaiser_bessel_derived_window(
self: ivy.Container,
periodic: Union[bool, ivy.Container] = True,
beta: Union[float, ivy.Container] = 12.0,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of
ivy.kaiser_bessel_derived_window. This method simply wraps the
function, and so the docstring for ivy.kaiser_bessel_derived_window
also applies to this method with minimal changes.
Parameters
----------
self
input container including window lengths.
periodic
If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Kaiser Bessel Derived windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5))
>>> x.kaiser_bessel_derived_window(True, 5)
{
a: ivy.array([0.70710677, 0.70710677]),
b: ivy.array([0.18493208, 0.9827513 , 0.9827513 , 0.18493208]),
}
"""
return self.static_kaiser_bessel_derived_window(
self,
periodic,
beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def static_hamming_window(
x: Union[int, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
periodic: Union[bool, ivy.Container] = True,
alpha: Union[float, ivy.Container] = 0.54,
beta: Union[float, ivy.Container] = 0.46,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.hamming_window. This
method simply wraps the function, and so the docstring for
ivy.hamming_window also applies to this method with minimal changes.
Parameters
----------
x
input container including window lengths.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
alpha
The coefficient alpha in the hamming window equation
beta
The coefficient beta in the hamming window equation
dtype
data type of the returned arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Hamming windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_hamming_window(x, periodic=True, alpha=0.2, beta=2)
{
a: ivy.array([-1.8000, 1.2000, 1.2000]),
b: ivy.array([-1.8000, -0.4180, 1.8180, 1.8180, -0.4180])
}
"""
return ContainerBase.cont_multi_map_in_function(
"hamming_window",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
periodic=periodic,
alpha=alpha,
beta=beta,
dtype=dtype,
out=out,
)
def hamming_window(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
periodic: Union[bool, ivy.Container] = True,
alpha: Union[float, ivy.Container] = 0.54,
beta: Union[float, ivy.Container] = 0.46,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.hamming_window. This
method simply wraps the function, and so the docstring for
ivy.hamming_window also applies to this method with minimal changes.
Parameters
----------
self
input container including window lengths.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
alpha
The coefficient alpha in the hamming window equation
beta
The coefficient beta in the hamming window equation
dtype
data type of the returned arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the Hamming windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5))
>>> x.hamming_window(periodic=True, alpha=0.2, beta=2)
{
a: ivy.array([-1.8000, 1.2000, 1.2000]),
b: ivy.array([-1.8000, -0.4180, 1.8180, 1.8180, -0.4180])
}
"""
return self.static_hamming_window(
self, periodic=periodic, alpha=alpha, beta=beta, dtype=dtype, out=out
)
@staticmethod
def static_vorbis_window(
x: Union[int, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.vorbis_window. This
method simply wraps the function, and so the docstring for
ivy.vorbis_window also applies to this method with minimal changes.
Parameters
----------
x
input container including window lengths.
dtype
data type of the returned arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the vorbis windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_vorbis_window(x)
{
a: ivy.array([0., 0.38268343, 0.92387953, 1., 0.92387953,
0.38268343]),
b: ivy.array([0., 0.14943586, 0.51644717, 0.85631905, 0.98877142,
1., 0.98877142, 0.85631905, 0.51644717, 0.14943586])
}
"""
return ContainerBase.cont_multi_map_in_function(
"vorbis_window",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def vorbis_window(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.vorbis_window. This
method simply wraps the function, and so the docstring for
ivy.vorbis_window also applies to this method with minimal changes.
Parameters
----------
self
input container including window lengths.
dtype
data type of the returned arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that includes the vorbis windows.
Examples
--------
>>> x = ivy.Container(a=3, b=5))
>>> x.vorbis_window()
{
a: ivy.array([0., 0.38268343, 0.92387953, 1., 0.92387953,
0.38268343]),
b: ivy.array([0., 0.14943586, 0.51644717, 0.85631905, 0.98877142,
1., 0.98877142, 0.85631905, 0.51644717, 0.14943586])
}
"""
return self.static_vorbis_window(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def static_tril_indices(
n_rows: Union[int, ivy.Container],
n_cols: Optional[Union[int, ivy.Container]] = None,
k: Union[int, ivy.Container] = 0,
/,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
) -> ivy.Container:
return ContainerBase.multi_map_in_static_method(
"tril_indices",
n_rows,
n_cols,
k,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
device=device,
)
def tril_indices(
self: ivy.Container,
n_rows: Union[int, ivy.Container],
n_cols: Optional[Union[int, ivy.Container]] = None,
k: Union[int, ivy.Container] = 0,
/,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
) -> ivy.Container:
return self.static_tril_indices(
n_rows,
n_cols,
k,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
device=device,
)
@staticmethod
def static_eye_like(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
k: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.eye_like. This method
simply wraps the function, and so the docstring for ivy.eye_like also
applies to this method with minimal changes.
Parameters
----------
x
input array or container from which to derive the output container shape.
k
index of the diagonal. A positive value refers to an upper diagonal,
a negative value to a lower diagonal, and 0 to the main diagonal.
Default: ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
output array data type. If ``dtype`` is ``None``, the output container
data type must be inferred from ``self``. Default ``None``.
device
device on which to place the created array. If device is ``None``, the
output container device must be inferred from ``self``. Default: ``None``.
out
optional output container, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
a container having the same shape as ``x`` and filled with ``ones``
in diagonal ``k`` and ``zeros`` elsewhere.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
b=ivy.array([4.5, -5.3, -0, -2.3]))
>>> y = ivy.Container.static_eye_like(x)
>>> print(y)
{
a: ivy.array([[1.]]),
b: ivy.array([[1.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"eye_like",
x,
k=k,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
dtype=dtype,
device=device,
)
def eye_like(
self: ivy.Container,
/,
k: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
out: Optional[ivy.Container] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.eye_like. This method
simply wraps the function, and so the docstring for ivy.eye_like also
applies to this method with minimal changes.
Parameters
----------
self
input array or container from which to derive the output container shape.
k
index of the diagonal. A positive value refers to an upper diagonal,
a negative value to a lower diagonal, and 0 to the main diagonal.
Default: ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
output array data type. If ``dtype`` is ``None``, the output container
data type must be inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If device is ``None``, the
output container device must be inferred from ``self``. Default: ``None``.
out
optional output container, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ret
a container having the same shape as ``x`` and filled with ``ones``
in diagonal ``k`` and ``zeros`` elsewhere.
Examples
--------
>>> x = ivy.Container(a=ivy.array([3., 8.]), b=ivy.array([2., 2.]))
>>> y = x.eye_like()
>>> print(y)
{
a: ivy.array([[1.],
[0.]]),
b: ivy.array([[1.],
[0.]])
}
"""
return self.static_eye_like(
self,
k,
key_chains,
to_apply,
prune_unapplied,
map_sequences,
out=out,
dtype=dtype,
device=device,
)
@staticmethod
def static_unsorted_segment_min(
data: ivy.Container,
segment_ids: ivy.Container,
num_segments: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.unsorted_segment_min.
This method simply wraps the function, and so the docstring for
ivy.unsorted_segment_min also applies to this method with minimal
changes.
Note
----
If the given segment ID `i` is negative, then the corresponding
value is dropped, and will not be included in the result.
Parameters
----------
data
input array or container from which to gather the input.
segment_ids
Must be in the same size with the first dimension of `data`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `data`.
num_segments
An integer or array representing the total number of distinct segment IDs.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
A container, representing the result of a segmented min operation.
For each segment, it computes the min value in `data` where `segment_ids`
equals to segment ID.
"""
return ContainerBase.cont_multi_map_in_function(
"unsorted_segment_min",
data,
segment_ids,
num_segments,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unsorted_segment_min(
self: ivy.Container,
segment_ids: ivy.Container,
num_segments: Union[int, ivy.Container],
):
r"""ivy.Container instance method variant of ivy.unsorted_segment_min.
This method simply wraps the function, and so the docstring for
ivy.unsorted_segment_min also applies to this method with minimal
changes.
Note
----
If the given segment ID `i` is negative, then the corresponding
value is dropped, and will not be included in the result.
Parameters
----------
self
input array or container from which to gather the input.
segment_ids
Must be in the same size with the first dimension of `self`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `self`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
A container, representing the result of a segmented min operation.
For each segment, it computes the min value in `self` where `segment_ids`
equals to segment ID.
"""
return self.static_unsorted_segment_min(
self,
segment_ids,
num_segments,
)
@staticmethod
def static_unsorted_segment_sum(
data: ivy.Container,
segment_ids: ivy.Container,
num_segments: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.unsorted_segment_sum.
This method simply wraps the function, and so the docstring for
ivy.unsorted_segment_sum also applies to this method with minimal
changes.
Parameters
----------
data
input array or container from which to gather the input.
segment_ids
Must be in the same size with the first dimension of `data`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `data`.
num_segments
An integer or array representing the total number of distinct segment IDs.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
A container, representing the result of a segmented sum operation.
For each segment, it computes the sum of values in `data` where
`segment_ids` equals to segment ID.
"""
return ContainerBase.cont_multi_map_in_function(
"unsorted_segment_sum",
data,
segment_ids,
num_segments,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unsorted_segment_sum(
self: ivy.Container,
segment_ids: ivy.Container,
num_segments: Union[int, ivy.Container],
):
r"""ivy.Container instance method variant of ivy.unsorted_segment_sum.
This method simply wraps the function, and so the docstring for
ivy.unsorted_segment_sum also applies to this method with minimal
changes.
Parameters
----------
self
input array or container from which to gather the input.
segment_ids
Must be in the same size with the first dimension of `self`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `self`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
A container, representing the result of a segmented sum operation.
For each segment, it computes the sum of values in `self` where
`segment_ids` equals to segment ID.
"""
return self.static_unsorted_segment_sum(
self,
segment_ids,
num_segments,
)
@staticmethod
def static_blackman_window(
window_length: Union[int, ivy.Container],
periodic: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.blackman_window. This
method simply wraps the function, and so the docstring for
ivy.blackman_window also applies to this method with minimal changes.
Parameters
----------
window_length
container including multiple window sizes.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output container, for writing the result to.
Returns
-------
ret
The container that contains the Blackman windows.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=3, b=5)
>>> ivy.Container.static_blackman_window(x)
{
a: ivy.array([-1.38777878e-17, 6.30000000e-01, 6.30000000e-01])
b: ivy.array([-1.38777878e-17, 2.00770143e-01, 8.49229857e-01,
8.49229857e-01, 2.00770143e-01])
}
"""
return ContainerBase.cont_multi_map_in_function(
"blackman_window",
window_length,
periodic,
dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def blackman_window(
self: ivy.Container,
periodic: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.blackman_window. This
method simply wraps the function, and so the docstring for
ivy.blackman_window also applies to this method with minimal changes.
Parameters
----------
self
input container with window sizes.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output container, for writing the result to.
Returns
-------
ret
The container containing the Blackman windows.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=3, b=5)
>>> ivy.blackman_window(x)
{
a: ivy.array([-1.38777878e-17, 6.30000000e-01, 6.30000000e-01])
b: ivy.array([-1.38777878e-17, 2.00770143e-01, 8.49229857e-01,
8.49229857e-01, 2.00770143e-01])
}
"""
return self.static_blackman_window(self, periodic, dtype, out=out)
@staticmethod
def _static_trilu(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
k: Union[int, ivy.Container] = 0,
upper: bool = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"trilu",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
k=k,
upper=upper,
out=out,
)
def trilu(
self: ivy.Container,
/,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
*,
k: Union[int, ivy.Container] = 0,
upper: bool = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self._static_trilu(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
k=k,
upper=upper,
out=out,
)
@staticmethod
def static_mel_weight_matrix(
num_mel_bins: Union[int, ivy.Container],
dft_length: Union[int, ivy.Container],
sample_rate: Union[int, ivy.Container],
lower_edge_hertz: Optional[Union[float, ivy.Container]] = 0.0,
upper_edge_hertz: Optional[Union[float, ivy.Container]] = 3000.0,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.mel_weight_matrix. This
method simply wraps the function, and so the docstring for
ivy.mel_weight_matrix also applies to this method with minimal changes.
Parameters
----------
num_mel_bins
The number of bands in the mel spectrum.
dft_length
The size of the original DFT obtained from (n_fft / 2 + 1).
sample_rate
Samples per second of the input signal.
lower_edge_hertz
Lower bound on the frequencies to be included in the mel spectrum.
upper_edge_hertz
The desired top edge of the highest frequency band.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
MelWeightMatrix of shape: [frames, num_mel_bins]
"""
return ContainerBase.cont_multi_map_in_function(
"mel_weight_matrix",
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz,
upper_edge_hertz,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def mel_weight_matrix(
self: ivy.Container,
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: Optional[float] = 0.0,
upper_edge_hertz: Optional[float] = 3000.0,
):
r"""ivy.Container instance method variant of ivy.mel_weight_matrix. This
method simply wraps the function, and so the docstring for
ivy.mel_weight_matrix also applies to this method with minimal changes.
Parameters
----------
num_mel_bins
The number of bands in the mel spectrum.
dft_length
The size of the original DFT obtained from (n_fft / 2 + 1).
sample_rate
Samples per second of the input signal.
lower_edge_hertz
Lower bound on the frequencies to be included in the mel spectrum.
upper_edge_hertz
The desired top edge of the highest frequency band.
Returns
-------
ret
MelWeightMatrix of shape: [frames, num_mel_bins]
"""
return self.static_mel_weight_matrix(
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz,
upper_edge_hertz,
)
@staticmethod
def static_unsorted_segment_mean(
data: ivy.Container,
segment_ids: Union[ivy.Array, ivy.Container],
num_segments: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""Compute the mean of values in the input data based on segment
identifiers.
Parameters
----------
data : ivy.Container
Input array or container from which to gather the input.
segment_ids : ivy.Container
An array of integers indicating the segment identifier for each element in
'data'.
num_segments : Union[int, ivy.Container]
An integer or array representing the total number of distinct segment IDs.
key_chains : Optional[Union[List[str], Dict[str, str], ivy.Container]], optional
The key-chains to apply or not apply the method to. Default is None.
to_apply : Union[bool, ivy.Container], optional
If True, the method will be applied to key-chains, otherwise key-chains will
be skipped. Default is True.
prune_unapplied : Union[bool, ivy.Container], optional
Whether to prune key-chains for which the function was not applied.
Default is False.
map_sequences : Union[bool, ivy.Container], optional
Whether to also map method to sequences (lists, tuples). Default is False.
Returns
-------
ivy.Container
A container representing the result of a segmented mean operation.
For each segment, it computes the mean of values in 'data' where
'segment_ids' equals the segment ID.
"""
return ContainerBase.cont_multi_map_in_function(
"unsorted_segment_mean",
data,
segment_ids,
num_segments,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unsorted_segment_mean(
self: ivy.Container,
segment_ids: Union[ivy.Array, ivy.Container],
num_segments: Union[int, ivy.Container],
) -> ivy.Container:
"""Compute the mean of values in the input array or container based on
segment identifiers.
Parameters
----------
self : ivy.Container
Input array or container from which to gather the input.
segment_ids : ivy.Container
An array of integers indicating the segment identifier for each element
in 'self'.
num_segments : Union[int, ivy.Container]
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ivy.Container
A container representing the result of a segmented mean operation.
For each segment, it computes the mean of values in 'self' where
'segment_ids' equals the segment ID.
Example
--------
>>> data = ivy.Container(a=ivy.array([0., 1., 2., 4.]),
... b=ivy.array([3., 4., 5., 6.]))
>>> segment_ids = ivy.array([0, 0, 1, 1])
>>> num_segments = 2
>>> result = ivy.unsorted_segment_mean(data, segment_ids, num_segments)
>>> print(result)
{
a: ivy.array([0.5, 3.0]),
b: ivy.array([3.5, 5.5])
}
>>> data = ivy.Container(a=ivy.array([0., 1., 2., 4., 5., 6.]),
... b=ivy.array([3., 4., 5., 6., 7., 8.]))
>>> segment_ids = ivy.array([0, 0, 1, 1, 2, 2])
>>> num_segments = 3
>>> result = ivy.unsorted_segment_mean(data, segment_ids, num_segments)
>>> print(result)
{
a: ivy.array([0.5, 3.0, 5.5]),
b: ivy.array([3.5, 5.5, 7.5])
}
"""
return self.static_unsorted_segment_mean(
self,
segment_ids,
num_segments,
)
@staticmethod
def static_polyval(
coeffs: ivy.Container,
x: Union[ivy.Container, int, float],
*,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
) -> ivy.Container:
r"""ivy.Container static method variant of ivy.polyval. This method
simply wraps the function, and so the docstring for ivy.polyval also
applies to this method with minimal changes.
Evaluate and return a polynomial at specific given values.
Parameters
----------
coeffs
Polynomial coefficients (including zero) from highest degree
to constant term.
x
The value of the indeterminate variable at which to evaluate the polynomial.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
Output container containing simplified result of substituting x in the
coefficients - final value of polynomial.
"""
return ContainerBase.cont_multi_map_in_function(
"polyval",
coeffs,
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def polyval(
self: ivy.Container,
coeffs: ivy.Container,
x: ivy.Container,
) -> ivy.Container:
r"""ivy.Container instance method variant of ivy.polyval. This method
simply wraps the function, and so the docstring for ivy.polyval also
applies to this method with minimal changes.
Evaluate and return a polynomial at specific given values.
Parameters
----------
self
Arbitrary input container
coeffs
Polynomial coefficients (including zero) from highest degree to
constant term.
x
The value of the indeterminate variable at which to
evaluate the polynomial.
Returns
-------
ret
Output container containing simplified result of substituting x in the
coefficients - final value of polynomial.
"""
return self.static_polyval(coeffs, x)
| ivy/ivy/data_classes/container/experimental/creation.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/creation.py",
"repo_id": "ivy",
"token_count": 23313
} | 11 |
# global
from typing import Optional, Union, List, Dict, Tuple, Sequence
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithStatisticalExperimental(ContainerBase):
@staticmethod
def static_histogram(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
bins: Optional[
Union[int, ivy.Array, ivy.NativeArray, ivy.Container, str]
] = None,
axis: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
extend_lower_interval: Optional[Union[bool, ivy.Container]] = False,
extend_upper_interval: Optional[Union[bool, ivy.Container]] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
range: Optional[Tuple[Union[bool, ivy.Container]]] = None,
weights: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
density: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.<func_name>. This method
simply wraps the function, and so the docstring for ivy.histogram also
applies to this method with minimal changes.
Parameters
----------
a
input array.
bins
if ``bins`` is an int, it defines the number of equal-width bins in the
given range.
if ``bins`` is an array, it defines a monotonically increasing array of bin
edges, including the rightmost edge, allowing for non-uniform bin widths.
axis
dimension along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. Default: ``None``.
extend_lower_interval
if True, extend the lowest interval I0 to (-inf, c1].
extend_upper_interval
ff True, extend the upper interval I_{K-1} to [c_{K-1}, +inf).
dtype
the output type.
range
the lower and upper range of the bins. The first element of the range must
be less than or equal to the second.
weights
each value in ``a`` only contributes its associated weight towards the bin
count (instead of 1). Must be of the same shape as a.
density
if True, the result is the value of the probability density function at the
bin, normalized such that the integral over the range of bins is 1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a tuple containing the values of the histogram and the bin edges.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.array([0., 1., 2., 3., 4., 5.])
>>> dtype = ivy.int32
>>> z = ivy.Container.static_histogram(x, bins=y, dtype=dtype)
>>> print(z.a)
>>> print(z.b)
(ivy.array([1, 1, 1, 0, 0]), ivy.array([0., 1., 2., 3., 4., 5.]))
(ivy.array([0, 0, 0, 1, 2]), ivy.array([0., 1., 2., 3., 4., 5.]))
"""
return ContainerBase.cont_multi_map_in_function(
"histogram",
a,
bins=bins,
axis=axis,
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=dtype,
range=range,
weights=weights,
density=density,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def histogram(
self: ivy.Container,
/,
*,
bins: Optional[
Union[int, ivy.Array, ivy.NativeArray, ivy.Container, str]
] = None,
axis: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
extend_lower_interval: Optional[Union[bool, ivy.Container]] = False,
extend_upper_interval: Optional[Union[bool, ivy.Container]] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
range: Optional[Union[Tuple[float], ivy.Container]] = None,
weights: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
density: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.<func_name>. This
method simply wraps the function, and so the docstring for
ivy.histogram also applies to this method with minimal changes.
Parameters
----------
self
input array.
bins
if ``bins`` is an int, it defines the number of equal-width bins in the
given range.
if ``bins`` is an array, it defines a monotonically increasing array of bin
edges, including the rightmost edge, allowing for non-uniform bin widths.
axis
dimension along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. Default: ``None``.
extend_lower_interval
if True, extend the lowest interval I0 to (-inf, c1].
extend_upper_interval
ff True, extend the upper interval I_{K-1} to [c_{K-1}, +inf).
dtype
the output type.
range
the lower and upper range of the bins. The first element of the range must
be less than or equal to the second.
weights
each value in ``a`` only contributes its associated weight towards the bin
count (instead of 1). Must be of the same shape as a.
density
if True, the result is the value of the probability density function at the
bin, normalized such that the integral over the range of bins is 1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a tuple containing the values of the histogram and the bin edges.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.array([0., 1., 2., 3., 4., 5.])
>>> dtype = ivy.int32
>>> z = x.histogram(bins=y, dtype=dtype)
>>> print(z)
{
a: ivy.array([1, 1, 1, 0, 0]),
b: ivy.array([0, 0, 0, 1, 2])
}
"""
return self.static_histogram(
self,
bins=bins,
axis=axis,
extend_lower_interval=extend_lower_interval,
extend_upper_interval=extend_upper_interval,
dtype=dtype,
range=range,
weights=weights,
density=density,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_median(
input: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.median. This method
simply wraps the function, and so the docstring for ivy.median also
applies to this method with minimal changes.
Parameters
----------
input
Input container including arrays.
axis
Axis or axes along which the medians are computed. The default is to compute
the median along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one.
out
optional output array, for writing the result to.
Returns
-------
ret
The median of the array elements.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.zeros((3, 4, 5)), b=ivy.zeros((2,7,6)))
>>> ivy.Container.static_moveaxis(x, 0, -1).shape
{
a: (4, 5, 3)
b: (7, 6, 2)
}
"""
return ContainerBase.cont_multi_map_in_function(
"median",
input,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def median(
self: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.median. This method
simply wraps the function, and so the docstring for ivy.median also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
axis
Axis or axes along which the medians are computed. The default is to compute
the median along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one.
out
optional output array, for writing the result to.
Returns
-------
ret
The median of the array elements.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(
>>> a=ivy.array([[10, 7, 4], [3, 2, 1]]),
>>> b=ivy.array([[1, 4, 2], [8, 7, 0]])
>>> )
>>> x.median(axis=0)
{
a: ivy.array([6.5, 4.5, 2.5]),
b: ivy.array([4.5, 5.5, 1.])
}
"""
return self.static_median(self, axis=axis, keepdims=keepdims, out=out)
@staticmethod
def static_nanmean(
input: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nanmean. This method
simply wraps the function, and so the docstring for ivy.nanmean also
applies to this method with minimal changes.
Parameters
----------
input
Input container including arrays.
axis
Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a. If the value is anything but the default,
then keepdims will be passed through to the mean or sum methods of
sub-classes of ndarray. If the sub-classes methods does not implement
keepdims any exceptions will be raised.
dtype
The desired data type of returned tensor. Default is None.
out
optional output array, for writing the result to.
Returns
-------
ret
The nanmean of the array elements in the container.
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, ivy.nan], [3, 4]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> ivy.Container.static_moveaxis(a)
{
x: 2.6666666666666665
y: 1.8
}
"""
return ContainerBase.cont_multi_map_in_function(
"nanmean",
input,
axis=axis,
keepdims=keepdims,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def nanmean(
self: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nanmean. This method
simply wraps the function, and so the docstring for ivy.nanmean also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
axis
Axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a. If the value is anything but the default,
then keepdims will be passed through to the mean or sum methods of
sub-classes of ndarray. If the sub-classes methods does not implement
keepdims any exceptions will be raised.
dtype
The desired data type of returned tensor. Default is None.
out
optional output array, for writing the result to.
Returns
-------
ret
The nanmean of the array elements in the input container.
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, ivy.nan], [3, 4]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> a.nanmean()
{
x: 2.6666666666666665
y: 1.8
}
"""
return self.static_nanmean(
self, axis=axis, keepdims=keepdims, dtype=dtype, out=out
)
@staticmethod
def _static_nanmin(
x: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str]]] = None,
to_apply: bool = True,
prune_unapplied: bool = False,
map_sequences: bool = False,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nanmin. This method
simply wraps the function, and so the docstring for ivy.nanmin also
applies to this method with minimal changes.
Parameters
----------
input
Input container including arrays.
axis
Axis or axes along which the minimum is computed.
The default is to compute the minimum of the flattened array.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The maximum value of an output element
where
Elements to compare for the minimum
Returns
-------
ret
Return minimum of an array or minimum along an axis, ignoring any NaNs.
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, 2], [3, ivy.nan]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> ivy.Container.static_nanmin(a)
{
x: 1.
y: 1.
}
"""
return ContainerBase.cont_multi_map_in_function(
"nanmin",
x,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
initial=initial,
where=where,
)
def nanmin(
self: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Optional[Union[bool, ivy.Container]] = False,
out: Optional[ivy.Container] = None,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nanmin. This method
simply wraps the function, and so the docstring for ivy.nanmin also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
axis
Axis or axes along which the minimum is computed.
The default is to compute the minimum of the flattened array.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The maximum value of an output element.
where
Elements to compare for the minimum.
Returns
-------
ret
Return minimum of an array or minimum along an axis, ignoring any NaNs
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, 2], [3, ivy.nan]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> a.nanmin()
{
x: 12.0
y: 12.0
}
"""
return self._static_nanmin(
self,
axis=axis,
keepdims=keepdims,
out=out,
initial=initial,
where=where,
)
@staticmethod
def static_nanprod(
input: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Optional[Union[bool, ivy.Container]] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
initial: Optional[Union[int, float, complex, ivy.Container]] = 1,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nanprod. This method
simply wraps the function, and so the docstring for ivy.nanprod also
applies to this method with minimal changes.
Parameters
----------
input
Input container including arrays.
axis
Axis or axes along which the product is computed.
The default is to compute the product of the flattened array.
dtype
The desired data type of returned array. Default is None.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The starting value for this product.
where
Elements to include in the product
Returns
-------
ret
The product of array elements over a given axis treating
Not a Numbers (NaNs) as ones
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, 2], [3, ivy.nan]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> ivy.Container.static_nanprod(a)
{
x: 12.0
y: 12.0
}
"""
return ContainerBase.cont_multi_map_in_function(
"nanprod",
input,
axis=axis,
keepdims=keepdims,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
initial=initial,
where=where,
)
def nanprod(
self: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
keepdims: Optional[Union[bool, ivy.Container]] = False,
out: Optional[ivy.Container] = None,
initial: Optional[Union[int, float, complex, ivy.Container]] = None,
where: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nanprod. This method
simply wraps the function, and so the docstring for ivy.nanprod also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
axis
Axis or axes along which the product is computed.
The default is to compute the product of the flattened array.
dtype
The desired data type of returned array. Default is None.
out
optional output array, for writing the result to.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original a.
initial
The starting value for this product.
where
Elements to include in the product
Returns
-------
ret
The product of array elements over a given axis treating
Not a Numbers (NaNs) as ones
Examples
--------
>>> a = ivy.Container(x=ivy.array([[1, 2], [3, ivy.nan]]),\
y=ivy.array([[ivy.nan, 1, 2], [1, 2, 3]])
>>> a.nanprod()
{
x: 12.0
y: 12.0
}
"""
return self.static_nanprod(
self,
axis=axis,
keepdims=keepdims,
dtype=dtype,
out=out,
initial=initial,
where=where,
)
@staticmethod
def static_quantile(
a: Union[ivy.Container, ivy.Array, ivy.NativeArray],
q: Union[ivy.Array, float, ivy.Container],
/,
*,
axis: Optional[Union[Sequence[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
interpolation: Union[str, ivy.Container] = "linear",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.quantile. This method
simply wraps the function, and so the docstring for ivy.quantile also
applies to this method with minimal changes.
Parameters
----------
a
Input container including arrays.
q
Quantile or sequence of quantiles to compute, which must be
between 0 and 1 inclusive.
axis
Axis or axes along which the quantiles are computed. The default
is to compute the quantile(s) along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
interpolation
{'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value:
'linear'.
This specifies the interpolation method to use when the desired quantile
lies between two data points i < j:
- linear: i + (j - i) * fraction, where fraction is the fractional part of
the index surrounded by i and j.
- lower: i.
- higher: j.
- nearest: i or j, whichever is nearest.
- midpoint: (i + j) / 2. linear and midpoint interpolation do not work with
integer dtypes.
out
optional output array, for writing the result to.
Returns
-------
ret
Container with (rank(q) + N - len(axis)) dimensional arrays of same dtype
as input arrays in the container, or, if axis is None, rank(q) arrays. The
first rank(q) dimensions index quantiles for different values of q.
Examples
--------
With one :class:`ivy.Container` input:
>>> a = ivy.Container(x=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
y=ivy.array([1., 2., 3., 4.]))
>>> q = 0.5
>>> b = ivy.Container.static_quantile(a, q)
>>> print(b)
{
x: 3.5,
y: 2.5
}
>>> a = ivy.Container(x=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
y=ivy.array([1., 2., 3., 4.]))
>>> q = ivy.array([0.5, 0.75])
>>> b = ivy.Container.static_quantile(a, q)
>>> print(b)
{
x: ivy.array([3.5, 6.25]),
y: ivy.array([2.5, 3.25])
}
>>> a = ivy.Container(x=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
y=ivy.array([1., 2., 3., 4.]))
>>> q = ivy.array([0.5, 0.75])
>>> b = ivy.Container.static_quantile(a, q, axis = 0)
>>> print(b)
{
x: ivy.array([[6.5, 4.5, 2.5],
[8.25, 5.75, 3.25]]),
y: ivy.array([2.5, 3.25])
}
>>> a = ivy.Container(x=ivy.array([[10., 7., 4.], [3., 2., 1.]]))
>>> b = ivy.Container.static_quantile(a, q, axis = 1, keepdims=True)
>>> print(b)
{
x: ivy.array([[[7.],
[2.]],
[[8.5],
[2.5]]])
}
>>> a = ivy.Container(x=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
y=ivy.array([1., 2., 3., 4.]))
>>> q = ivy.array([0.3, 0.7])
>>> b = ivy.Container.static_quantile(a, q, axis = 0, interpolation="lower")
>>> print(b)
{
x: ivy.array([[3., 2., 1.],
[3., 2., 1.]]),
y: ivy.array([1., 3.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"quantile",
a,
q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def quantile(
self: ivy.Container,
q: Union[ivy.Array, float, ivy.Container],
/,
*,
axis: Optional[Union[Sequence[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
interpolation: Union[str, ivy.Container] = "linear",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.quantile. This method
simply wraps the function, and so the docstring for ivy.quantile also
applies to this method with minimal changes.
Parameters
----------
a
Input container including arrays.
q
Quantile or sequence of quantiles to compute, which must be
between 0 and 1 inclusive.
axis
Axis or axes along which the quantiles are computed. The default
is to compute the quantile(s) along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
interpolation
{'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value:
'linear'.
This specifies the interpolation method to use when the desired quantile
lies between two data points i < j:
- linear: i + (j - i) * fraction, where fraction is the fractional part of
the index surrounded by i and j.
- lower: i.
- higher: j.
- nearest: i or j, whichever is nearest.
- midpoint: (i + j) / 2. linear and midpoint interpolation do not work with
integer dtypes.
out
optional output array, for writing the result to.
Returns
-------
ret
Container with (rank(q) + N - len(axis)) dimensional arrays of same dtype
as input arrays in the container, or, if axis is None, rank(q) arrays. The
first rank(q) dimensions index quantiles for different values of q.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
... b=ivy.array([1., 2., 3., 4.]))
>>> z = ivy.array([0.5])
>>> y = x.quantile(z)
>>> print(y)
{
a: ivy.array(3.5),
b: ivy.array(2.5)
}
>>> x = ivy.Container(a=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
... b=ivy.array([1., 2., 3., 4.]))
>>> z = ivy.array([0.5, 0.75])
>>> y = x.quantile(z)
>>> print(y)
{
a: ivy.array([3.5, 6.25]),
b: ivy.array([2.5, 3.25])
}
>>> x = ivy.Container(a=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
... b=ivy.array([1., 2., 3., 4.]))
>>> z = ivy.array([0.5, 0.75])
>>> y = x.quantile(z, axis = 0)
>>> print(y)
{
a: ivy.array([[6.5, 4.5, 2.5],
[8.25, 5.75, 3.25]]),
b: ivy.array([2.5, 3.25])
}
>>> x = ivy.Container(a=ivy.array([[10., 7., 4.], [3., 2., 1.]]))
>>> z = ivy.array([0.5, 0.75])
>>> y = x.quantile(z, axis = 1, keepdims=True)
>>> print(y)
{
a: ivy.array([[[7.],
[2.]],
[[8.5],
[2.5]]])
}
>>> x = ivy.Container(a=ivy.array([[10., 7., 4.], [3., 2., 1.]]),
... b=ivy.array([1., 2., 3., 4.]))
>>> z = ivy.array([0.3, 0.7])
>>> y = x.quantile(z, axis = 0, interpolation="lower")
>>> print(y)
{
a: ivy.array([[3., 2., 1.],
[3., 2., 1.]]),
b: ivy.array([1., 3.])
}
"""
return self.static_quantile(
self,
q,
axis=axis,
keepdims=keepdims,
interpolation=interpolation,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_corrcoef(
x: ivy.Container,
/,
*,
y: Optional[ivy.Container] = None,
rowvar: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = False,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.corrcoef. This method
simply wraps the function, and so the docstring for ivy.corrcoef also
applies to this method with minimal changes.
Parameters
----------
x
Input container including arrays.
y
An additional input container.
rowvar
If rowvar is True (default), then each row represents a variable, with
observations in the columns. Otherwise, the relationship is transposed:
each column represents a variable, while the rows contain observations.
Returns
-------
ret
The corrcoef of the array elements in the container.
Examples
--------
>>> a = ivy.Container(w=ivy.array([[1., 2.], [3., 4.]]), \
z=ivy.array([[0., 1., 2.], [2., 1., 0.]]))
>>> ivy.Container.corrcoef(a)
{
w: ivy.array([[1., 1.],
[1., 1.]]),
z: ivy.array([[1., -1.],
[-1., 1.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"corrcoef",
x,
y=y,
rowvar=rowvar,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def corrcoef(
self: ivy.Container,
/,
*,
y: Optional[ivy.Container] = None,
rowvar: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.corrcoef. This method
simply wraps the function, and so the docstring for ivy.corrcoef also
applies to this method with minimal changes.
Parameters
----------
self
Input container including arrays.
y
An additional input container.
rowvar
If rowvar is True (default), then each row represents a variable, with
observations in the columns. Otherwise, the relationship is transposed:
each column represents a variable, while the rows contain observations.
Returns
-------
ret
The corrcoef of the array elements in the input container.
Examples
--------
>>> a = ivy.Container(w=ivy.array([[1., 2.], [3., 4.]]), \
z=ivy.array([[0., 1., 2.], [2., 1., 0.]]))
>>> ivy.Container.corrcoef(a)
{
w: ivy.array([[1., 1.],
[1., 1.]]),
z: ivy.array([[1., -1.],
[-1., 1.]])
}
"""
return self.static_corrcoef(self, y=y, rowvar=rowvar, out=out)
@staticmethod
def static_nanmedian(
input: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
overwrite_input: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.median. This method
simply wraps the function, and so the docstring for ivy.median also
applies to this method with minimal changes.
Parameters
----------
input
Input container including arrays.
axis
Axis or axes along which the medians are computed. The default is to compute
the median along a flattened version of the array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one.
overwrite_input
If True, then allow use of memory of input array for calculations.
out
optional output array, for writing the result to.
Returns
-------
ret
The median of the array elements.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[10.0, ivy.nan, 4], [3, 2, 1]]))
>>> ivy.Container.static_nanmedian(x)
{
a: ivy.array(3.)
}
"""
return ContainerBase.cont_multi_map_in_function(
"nanmedian",
input,
axis=axis,
keepdims=keepdims,
overwrite_input=overwrite_input,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def nanmedian(
self: ivy.Container,
/,
*,
axis: Optional[Union[Tuple[int], int, ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
overwrite_input: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nanmedian. This method
simply wraps the function, and so the docstring for ivy.nanmedian also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
axis
The axis or axes along which the means are computed.
The default is to compute the mean of the flattened array.
keepdims
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast
correctly against the original container. If the value is anything
but the default, then keepdims will be passed through to the mean or
sum methods of sub-classes of ndarray. If the sub-classes methods
does not implement keepdims any exceptions will be raised.
overwrite_input
If True, then allow use of memory of input array a for calculations.
The input array will be modified by the call to median.
This will save memory when you do not need to preserve
the contents of the input array.Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. If overwrite_input is True and
input container does not already have leaves which are
of the ndarray kind, an error will be raised.
out
optional output array, for writing the result to.
Returns
-------
ret
A new array holding the result. If the input contains integers
Examples
--------
With :class:`ivy.Container` input and default backend set as `numpy`:
>>> x = ivy.Container(a=ivy.array([[10.0, ivy.nan, 4], [3, 2, 1]]),
b=ivy.array([[12, 10, 34], [45, 23, ivy.nan]]))
>>> x.nanmedian()
{
a: ivy.array(3.),
b: ivy.array(23.)
}
>>> x.nanmedian(axis=0)
{
a: ivy.array([6.5, 2., 2.5]),
b: ivy.array([28.5, 16.5, 34.])
}
"""
return self.static_nanmedian(
self, axis=axis, keepdims=keepdims, overwrite_input=overwrite_input, out=out
)
@staticmethod
def static_bincount(
x: ivy.Container,
/,
*,
weights: Optional[ivy.Container] = None,
minlength: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bincount. This method
simply wraps the function, and so the docstring for ivy.bincount also
applies to this method with minimal changes.
Parameters
----------
x
Input container including arrays.
weights
An optional input container including arrays.
minlength
A minimum number of bins for the output array.
Returns
-------
ret
The bincount of the array elements.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 1, 2, 2, 2, 3]),
b=ivy.array([1, 1, 2, 2, 2, 3]))
>>> ivy.Container.static_bincount(x)
{
a: array([0, 2, 3, 1])
b: array([0, 2, 3, 1])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bincount",
x,
weights=weights,
minlength=minlength,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bincount(
self: ivy.Container,
/,
*,
weights: Optional[ivy.Container] = None,
minlength: Union[int, ivy.Container] = 0,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Array instance method variant of ivy.bincount. This method
simply wraps the function, and so the docstring for ivy.bincount also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
weights
An optional input array.
minlength
A minimum number of bins for the output array.
Returns
-------
ret
The bincount of the array elements.
Examples
--------
>>> a = ivy.Container([[10.0, ivy.nan, 4], [3, 2, 1]])
>>> a.bincount(a)
3.0
>>> a.bincount(a, axis=0)
array([6.5, 2. , 2.5])
"""
return self.static_bincount(self, weights=weights, minlength=minlength, out=out)
@staticmethod
def static_igamma(
a: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.igamma. This method
simply wraps the function, and so the docstring for ivy.igamma also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
x
An additional input array.
`x` has the same type as `a`.
out
optional output array, for writing the result to.
Returns
-------
ret
The lower incomplete gamma function of the array elements.
Examples
--------
>>> a = ivy.array([2.5])
>>> x = ivy.array([1.7, 1.2])
>>> a.igamma(x)
ivy.array([0.3614, 0.2085])
"""
return ContainerBase.cont_multi_map_in_function(
"igamma",
a,
x=x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def igamma(
self: ivy.Container,
/,
*,
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.igamma. This method
simply wraps the function, and so the docstring for ivy.igamma also
applies to this method with minimal changes.
Parameters
----------
self
Input array.
x
An additional input array.
`x` has the same type as `a`.
out
optional output array, for writing the result to.
Returns
-------
ret
The lower incomplete gamma function of the array elements.
Examples
--------
>>> a = ivy.array([2.5])
>>> x = ivy.array([1.7, 1.2])
>>> a.igamma(x)
ivy.array([0.3614, 0.2085])
"""
return self.static_igamma(self, x=x, out=out)
@staticmethod
def static_lgamma(
a: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"lgamma",
a,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def lgamma(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self.static_lgamma(self, out=out)
@staticmethod
def static_cov(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container] = None,
/,
*,
rowVar: Union[bool, ivy.Container] = True,
bias: Union[bool, ivy.Container] = False,
ddof: Union[int, ivy.Container] = None,
fweights: Union[ivy.Array, ivy.Container] = None,
aweights: Union[ivy.Array, ivy.Container] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cov. This method simply
wraps the function, and so the docstring for ivy.cov also applies to
this method with minimal changes.
Parameters
----------
x1
a 1D or 2D input array, nativearray or container, with a numeric data type.
x2
optional second 1D or 2D input array, nativearray, or container, with a
numeric data type. Must have the same shape as x1.
rowVar
optional variable where each row of input is interpreted as a variable
(default = True). If set to False, each column is instead interpreted
as a variable.
bias
optional variable for normalizing input (default = False) by (N - 1) where
N is the number of given observations. If set to True, then normalization
is instead by N. Can be overridden by keyword ``ddof``.
ddof
optional variable to override ``bias`` (default = None). ddof=1 will return
the unbiased estimate, even with fweights and aweights given. ddof=0 will
return the simple average.
fweights
optional 1D array of integer frequency weights; the number of times each
observation vector should be repeated.
aweights
optional 1D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ddof=0 is specified, the array
of weights can be used to assign probabilities to observation vectors.
dtype
optional variable to set data-type of the result. By default, data-type
will have at least ``numpy.float64`` precision.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the covariance matrix of an input matrix, or the
covariance matrix of two variables. The returned container must have a
floating-point data type determined by Type Promotion Rules and must be
a square matrix of shape (N, N), where N is the number of rows in the
input(s).
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([1., 2., 3.])
>>> y = ivy.Container(a=ivy.array([3. ,2. ,1.]), b=ivy.array([-1., -2., -3.]))
>>> z = ivy.Container.static_cov(x, y)
>>> print(z)
{
a: ivy.array([ 1., -1.]
[-1., 1.]),
b: ivy.array([ 1., -1.]
[-1., 1.])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([1., 2., 3.]))
>>> y = ivy.Container(a=ivy.array([3., 2., 1.]), b=ivy.array([3., 2., 1.]))
>>> z = ivy.Container.static_cov(x, y)
>>> print(z)
{
a: ivy.container([ 1., -1., -1., -1.]
[ 1., 1., -1., -1.]),
b: ivy.container([-1., -1., 1., 1.]
[-1., 1., 1., 1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cov",
x1,
x2,
rowVar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def cov(
self: ivy.Container,
x2: ivy.Container = None,
/,
*,
rowVar: Union[bool, ivy.Container] = True,
bias: Union[bool, ivy.Container] = False,
ddof: Optional[Union[int, ivy.Container]] = None,
fweights: Optional[Union[ivy.Array, ivy.Container]] = None,
aweights: Optional[Union[ivy.Array, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cov. This method simply
wraps the function, and so the docstring for ivy.cov also applies to
this method with minimal changes.
Parameters
----------
self
a 1D or 2D input container, with a numeric data type.
x2
optional second 1D or 2D input array, nativearray, or container, with a
numeric data type. Must have the same shape as ``self``.
rowVar
optional variable where each row of input is interpreted as a variable
(default = True). If set to False, each column is instead interpreted
as a variable.
bias
optional variable for normalizing input (default = False) by (N - 1) where
N is the number of given observations. If set to True, then normalization
is instead by N. Can be overridden by keyword ``ddof``.
ddof
optional variable to override ``bias`` (default = None). ddof=1 will return
the unbiased estimate, even with fweights and aweights given. ddof=0 will
return the simple average.
fweights
optional 1D array of integer frequency weights; the number of times each
observation vector should be repeated.
aweights
optional 1D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ddof=0 is specified, the array
of weights can be used to assign probabilities to observation vectors.
dtype
optional variable to set data-type of the result. By default, data-type
will have at least ``numpy.float64`` precision.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the covariance matrix of an input matrix, or the
covariance matrix of two variables. The returned container must have a
floating-point data type determined by Type Promotion Rules and must be
a square matrix of shape (N, N), where N is the number of variables in the
input(s).
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([1., 2., 3.]))
>>> y = ivy.Container(a=ivy.array([3., 2., 1.]), b=ivy.array([3., 2., 1.]))
>>> z = x.cov(y)
>>> print(z)
{
a: ivy.array([[1., -1.],
[-1., 1.]]),
b: ivy.array([[1., -1.],
[-1., 1.]])
}
"""
return self.static_cov(
self,
x2,
rowVar=rowVar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
dtype=dtype,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def cummax(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cummax. This method
simply wraps the function, and so the docstring for ivy.cummax also
applies to this method with minimal changes.
Parameters
----------
self
Input container to cummax at leaves.
axis
Axis along which the cumulative product is computed. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cummax at leaves along specified axis.
--------
With one :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = x.cummax(axis=0)
>>> print(y)
[{
a: ivy.array([1, 2, 3]),
b: ivy.array([4, 5, 6])
}, {
a: ivy.array([0, 1, 2]),
b: ivy.array([0, 1, 2])
}]
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
... b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> x.cummax(axis=1, exclusive=True, out=y)
>>> print(y)
{
a: ivy.array([[0., 1.],
[0., 1.],
[0., 1.]]),
b: ivy.array([[0., 1.],
[0., 1.],
[0., 1.]])
}
"""
return self._static_cummax(
self,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
def cummin(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cummin. This method
simply wraps the function, and so the docstring for ivy.cummin also
applies to this method with minimal changes.
Parameters
----------
self
Input container to cummin at leaves.
axis
Axis along which the cumulative product is computed. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cummin at leaves along specified axis.
Examples #TODO: change examples and change doc string
--------
With one :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = x.cummin(axis=0)
>>> print(y)
{
a: ivy.array([1, 1, 1]),
b: ivy.array([4, 4, 4])
}
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> x.cummin(axis=1, out=y)
{
a: ivy.array([[2, 2],
[5, 5],
[11, 11]]),
b: ivy.array([[3, 3],
[4, 4],
[5, 5]])
}
"""
return self._static_cummin(
self,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def _static_cummax(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cummax. This method
simply wraps the function, and so the docstring for ivy.cummax also
applies to this method with minimal changes.
Parameters
----------
x
Input array or container to cummax.
axis
Axis to cummax along. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
reverse
Whether to perform the cummax from last to first element in the selected
axis. Default is ``False`` (from first to last element)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cummax at leaves along specified axis.
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = ivy.Container.static_cummax(x, axis=0)
>>> print(y)
{
a: ivy.array([1, 2, 3]),
b: ivy.array([4, 5, 6])
}
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> ivy.Container.static_cummax(x, axis=1, out=y)
>>> print(y)
{
a: ivy.array([[2., 3.],
[5., 7.],
[11., 13.]]),
b: ivy.array([[3., 4.],
[4., 5.],
[5., 6.]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cummax",
x,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
@staticmethod
def _static_cummin(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = 0,
exclusive: Union[bool, ivy.Container] = False,
reverse: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cummin. This method
simply wraps the function, and so the docstring for ivy.cummin also
applies to this method with minimal changes.
Parameters
----------
x
Input array or container to cummin.
axis
Axis to cummin along. Default is ``0``.
exclusive
Whether to exclude the first element of the input array.
Default is ``False``.
reverse
Whether to perform the cummin from last to first element in the selected
axis. Default is ``False`` (from first to last element)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
dtype
Data type of the returned array. Default is ``None``.
out
Optional output container. Default is ``None``.
Returns
-------
ret
Containers with arrays cummin at leaves along specified axis.
Examples #TODO: fix examples and this doc
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = ivy.Container.static_cummin(x, axis=0)
>>> print(y)
{
a: ivy.array([1, 1, 1]),
b: ivy.array([4, 4, 4])
}
>>> x = ivy.Container(a=ivy.array([[2, 3], [5, 7], [11, 13]]),
b=ivy.array([[3, 4], [4, 5], [5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> x.static_cummin(axis=1, out=y)
{
a: ivy.array([[2, 2],
[5, 5],
[11, 11]]),
b: ivy.array([[3, 3],
[4, 4],
[5, 5]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cummin",
x,
axis=axis,
exclusive=exclusive,
reverse=reverse,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
dtype=dtype,
out=out,
)
| ivy/ivy/data_classes/container/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 34744
} | 12 |
# local
import ivy
# global
from typing import Callable, Type, List, Iterable, Optional, Union, Sequence, Dict
from types import ModuleType
TO_IGNORE = ["is_ivy_array", "is_native_array", "is_array", "shape"]
def _wrap_function(function_name: str, static: bool) -> Callable:
"""Wrap the function called `function_name`.
Parameters
----------
function_name
the name of the function e.g. "abs", "mean" etc.
static
whether the function being wrapped will be added as a static method.
Returns
-------
new_function
the wrapped function.
"""
def new_function(
*args,
key_chains: Optional[
Union[Sequence[str], Dict[str, str], ivy.Container]
] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
**kwargs
):
function = ivy.__dict__[function_name]
data_idx = function.array_spec[0]
if (
not (data_idx[0][0] == 0 and len(data_idx[0]) == 1)
and args
and ivy.is_ivy_container(args[0])
and not static
):
# if the method has been called as an instance method, and self should not
# be the first positional arg, then we need to re-arrange and place self
# in the correct location in the args or kwargs
self = args[0]
args = args[1:]
if len(args) > data_idx[0][0]:
args = ivy.copy_nest(args, to_mutable=True)
data_idx = [data_idx[0][0]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(args, data_idx, self)
else:
kwargs = ivy.copy_nest(kwargs, to_mutable=True)
data_idx = [data_idx[0][1]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(kwargs, data_idx, self)
# return function multi-mapped across the corresponding leaves of the containers
return ivy.ContainerBase.cont_multi_map_in_function(
function_name,
*args,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
**kwargs
)
return new_function
def add_ivy_container_instance_methods(
cls: Type[ivy.Container],
modules: Union[List[ModuleType], ivy.Container],
static: Union[bool, ivy.Container] = False,
to_ignore: Union[Iterable, ivy.Container] = (),
):
"""Loop over all ivy modules such as activations, general, etc. and add the
module functions to ivy container as instance methods using _wrap_function.
Parameters
----------
cls
the class we want to add the instance methods to.
modules
the modules to loop over: activations, general etc.
static
whether the function should be added as a static method.
to_ignore
any functions we don't want to add an instance method for.
Examples
--------
As shown, `add_ivy_container_instance_methods` adds all the appropriate functions
from the statistical module as instance methods to our toy `ContainerExample` class:
>>> from ivy.functional.ivy import statistical
>>> class ContainerExample:
... pass
>>> ivy.add_ivy_container_instance_methods(ContainerExample, [statistical])
>>> print(hasattr(ContainerExample, "mean"), hasattr(ContainerExample, "var"))
True True
"""
to_ignore = TO_IGNORE + list(to_ignore)
for module in modules:
for key, value in module.__dict__.items():
full_key = ("static_" if static else "") + key
# skip cases where the function is protected, or first letter is uppercase
# (i.e. is a class), or if the instance method already exists etc
if (
key.startswith("_")
or key[0].isupper()
or not callable(value)
or full_key in cls.__dict__
or hasattr(cls, full_key)
or full_key in to_ignore
or key not in ivy.__dict__
):
continue
try:
setattr(cls, full_key, _wrap_function(key, static))
except AttributeError:
pass
| ivy/ivy/data_classes/container/wrapping.py/0 | {
"file_path": "ivy/ivy/data_classes/container/wrapping.py",
"repo_id": "ivy",
"token_count": 2088
} | 13 |
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(dead_code)]
include!(concat!(env!("OUT_DIR"), "/c_xla.rs"));
| ivy/ivy/engines/XLA/rust_api/src/c_lib.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/c_lib.rs",
"repo_id": "ivy",
"token_count": 79
} | 14 |
import contextlib
import ivy
import functools
import logging
import weakref
import warnings
import copy as python_copy
from types import FunctionType
from typing import Callable, Literal
import inspect
import numpy as np
from ivy.utils.exceptions import IvyValueError
# for wrapping (sequence matters)
FN_DECORATORS = [
"handle_complex_input",
"handle_device",
"infer_dtype",
"handle_array_function",
"outputs_to_ivy_arrays",
"outputs_to_ivy_shapes",
"outputs_to_native_arrays",
"inputs_to_native_arrays",
"inputs_to_native_shapes",
"inputs_to_ivy_arrays",
"handle_out_argument",
"handle_view_indexing",
"handle_view",
"handle_array_like_without_promotion",
"handle_partial_mixed_function",
"handle_nestable",
"handle_ragged",
"handle_backend_invalid",
"temp_asarray_wrapper",
"handle_exceptions",
"handle_nans",
]
# Helpers #
# --------#
# for casting modes, order is the hierarchy
casting_modes_dict = {
"uint": lambda: ivy.valid_uint_dtypes,
"int": lambda: sorted(
set(ivy.valid_int_dtypes).difference(set(ivy.valid_uint_dtypes))
),
"float": lambda: ivy.valid_float_dtypes,
"complex": lambda: ivy.valid_complex_dtypes,
}
def caster(dtype, intersect):
if hasattr(dtype, "dtype"):
dtype = ivy.as_ivy_dtype(dtype.dtype)
else:
dtype = ivy.as_ivy_dtype(dtype)
if str(dtype) in intersect:
# based on upcasting or downcasting do something
if ivy.cast_dtypes():
# all casting types is enabled
# check cross_casting
ret_dtype = cross_caster(intersect)
if ret_dtype:
return ret_dtype
# check upcasting
ret_dtype = upcaster(dtype, intersect)
if ret_dtype:
return ret_dtype
# check downcasting
ret_dtype = downcaster(dtype, intersect)
if ret_dtype:
return ret_dtype
elif ivy.crosscast_dtypes:
# check cross_casting
ret_dtype = cross_caster(intersect)
if ret_dtype:
return ret_dtype
elif ivy.upcast_dtypes:
# check upcasting
ret_dtype = upcaster(dtype, intersect)
if ret_dtype:
return ret_dtype
elif ivy.downcast_dtypes:
# check downcasting
ret_dtype = downcaster(dtype, intersect)
if ret_dtype:
return ret_dtype
def cast_helper(arg, dtype, intersect, is_upcast=True):
step = 1 if is_upcast else -1
index = casting_modes_dict[arg]().index(dtype) + step
result = ""
while 0 <= index < len(casting_modes_dict[arg]()):
if casting_modes_dict[arg]()[index] not in intersect:
result = casting_modes_dict[arg]()[index]
break
index += step
return result
def upcaster(dtype, intersect):
# upcasting is enabled, we upcast to the highest
if "uint" in str(dtype):
return cast_helper("uint", dtype, intersect, is_upcast=True)
if "int" in dtype:
return cast_helper("int", dtype, intersect, is_upcast=True)
if "float" in dtype:
return cast_helper("float", dtype, intersect, is_upcast=True)
if "complex" in dtype:
return cast_helper("complex", dtype, intersect, is_upcast=True)
def downcaster(dtype, intersect):
# downcasting is enabled, we upcast to the highest
if "uint" in str(dtype):
return cast_helper("uint", dtype, intersect, is_upcast=False)
if "int" in dtype:
return cast_helper("int", dtype, intersect, is_upcast=False)
if "float" in dtype:
return cast_helper("float", dtype, intersect, is_upcast=False)
if "complex" in dtype:
return cast_helper("complex", dtype, intersect, is_upcast=False)
def cross_caster(intersect):
# check if this is an integer unsupported case
# intersect is unordered, sorting it makes a list
# and remaking it a set messes the order
# so we stick with making both of these
# sorted lists
dtype = ""
valid_float = sorted(ivy.valid_float_dtypes)
valid_int = sorted(ivy.valid_int_dtypes)
intersect = sorted(intersect)
if set(valid_int).issubset(intersect):
# make dtype equal to default float
dtype = ivy.default_float_dtype()
elif set(valid_float).issubset(intersect):
# make dtype equal to default int
dtype = ivy.default_int_dtype()
return str(dtype)
def try_array_function_override(func, overloaded_args, types, args, kwargs):
if not overloaded_args:
return False, None
for overloaded_arg in overloaded_args:
# Note that we're only calling __ivy_array_function__ on the *first*
# occurrence of each argument type. This is necessary for reasonable
# performance with a possibly long list of overloaded arguments, for
# which each __ivy_array_function__ implementation might reasonably need to
# check all argument types.
try:
result = overloaded_arg.__ivy_array_function__(func, types, args, kwargs)
except Exception:
raise ivy.utils.exceptions.IvyNotImplementedException
if result is not NotImplemented:
return True, result
raise TypeError(
f"no implementation found for {func} on types that implement"
f" __ivy_array_function__: {list(map(type, overloaded_args))}"
)
def _get_first_array(*args, **kwargs):
# ToDo: make this more efficient, with function ivy.nested_nth_index_where
def array_fn(x):
return (
ivy.is_array(x)
if not hasattr(x, "_ivy_array")
else ivy.is_array(x.ivy_array)
)
array_fn = array_fn if "array_fn" not in kwargs else kwargs["array_fn"]
arr = None
if args:
arr_idxs = ivy.nested_argwhere(args, array_fn, stop_after_n_found=1)
if arr_idxs:
arr = ivy.index_nest(args, arr_idxs[0])
else:
arr_idxs = ivy.nested_argwhere(kwargs, array_fn, stop_after_n_found=1)
if arr_idxs:
arr = ivy.index_nest(kwargs, arr_idxs[0])
elif kwargs:
arr_idxs = ivy.nested_argwhere(kwargs, array_fn, stop_after_n_found=1)
if arr_idxs:
arr = ivy.index_nest(kwargs, arr_idxs[0])
return arr
def _build_view(original, view, fn, args, kwargs, index=None):
if ivy.exists(original._base):
base = original._base
view._manipulation_stack = python_copy.copy(original._manipulation_stack)
else:
base = original
view._base = base
base._view_refs.append(weakref.ref(view))
view._manipulation_stack.append((fn, args[1:], kwargs, index))
# Handle attributes for torch functions without native view functionality
if ivy.exists(original._torch_base):
view._torch_base = (
original
if ivy.exists(original._torch_manipulation)
else original._torch_base
)
else:
view._torch_base = base
if fn in _torch_non_native_view_functions:
view._torch_manipulation = (original, (fn, args[1:], kwargs))
view._torch_base._torch_view_refs.append(weakref.ref(view))
return view
_torch_non_native_view_functions = ("flip", "flipud", "rot90", "fliplr")
def _check_in_nested_sequence(sequence, value=None, _type=None):
"""Check `sequence` for either a `value` or a value of type `_type`.
Helper to recursively check if a N-level nested `sequence` contains
either a `value` or contains a value of type `_type` and return a
boolean flag.
"""
if sequence is value or (isinstance(sequence, _type)):
# Base case - N = 0
return True
elif isinstance(sequence, (tuple, list)):
if any(isinstance(_val, _type) or _val is value for _val in sequence):
# N = 1
return True
else:
return any(
_check_in_nested_sequence(sub_sequence, value, _type)
for sub_sequence in sequence
if isinstance(sub_sequence, (tuple, list))
)
def _get_preferred_device(args, kwargs):
# When new arrays are created, they should be created on the same device as
# existing array inputs. If a device is specified as a kwarg, create them there.
# If not, scan for any other inputs which are already arrays and use the device
# of the first one found (unless we're in soft device mode).
device = None
if "device" in kwargs and kwargs["device"] is not None:
return device
if not ivy.soft_device_mode:
arr_arg = _get_first_array(*args, **kwargs)
return ivy.default_device(item=arr_arg, as_native=True)
return ivy.default_device(as_native=True)
# Array Handling #
# ---------------#
def handle_array_function(fn):
"""Wrap a function `fn` to be passed to array_function method.
Wrap a function to extract the relevant argument types to be passed
to array_function method.
"""
@functools.wraps(fn)
def _handle_array_function(*args, **kwargs):
overloaded_types = []
overloaded_args = []
for arg in args + tuple(kwargs.values()):
if ivy.exists(arg):
if not isinstance(arg, ivy.Container) and hasattr(
arg, "__ivy_array_function__"
):
if type(arg) not in overloaded_types:
overloaded_types.append(type(arg))
if (
arg.__ivy_array_function__
is not ivy.Array.__ivy_array_function__
and not isinstance(arg, (ivy.Array, ivy.NativeArray))
):
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(type(arg), type(old_arg)):
index = i
break
overloaded_args.insert(index, arg)
elif isinstance(arg, ivy.Container):
arg = ivy.Container.cont_flatten_key_chains(arg)
indices = ivy.nested_argwhere(
arg, lambda x: hasattr(x, "__ivy_array_function__")
)
for a in indices:
if type(getattr(arg, a[0])) not in overloaded_types:
overloaded_types.append(type(getattr(arg, a[0])))
if getattr(
arg, a[0]
).__ivy_array_function__ is not ivy.Array.__ivy_array_function__ and not isinstance( # noqa: E501
getattr(arg, a[0]), (ivy.Array, ivy.NativeArray)
):
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(
type(getattr(arg, a[0])), type(old_arg)
):
index = i
break
overloaded_args.insert(index, arg)
success, value = try_array_function_override(
ivy.__dict__[fn.__name__], overloaded_args, overloaded_types, args, kwargs
)
if success:
return value
return fn(*args, **kwargs)
_handle_array_function.handle_array_function = True
return _handle_array_function
def handle_array_like_without_promotion(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_array_like_without_promotion(*args, **kwargs):
args = list(args)
num_args = len(args)
try:
type_hints = inspect.signature(fn).parameters
except (TypeError, ValueError):
return fn(*args, **kwargs)
parameters = list(type_hints.keys())
annotations = [param.annotation for param in type_hints.values()]
device = _get_preferred_device(args, kwargs)
for i, (annotation, parameter, arg) in enumerate(
zip(annotations, parameters, args)
):
annotation_str = str(annotation)
if (
("rray" in annotation_str or "Tensor" in annotation_str)
and parameter != "out"
and all(
sq not in annotation_str
for sq in ["Sequence", "List", "Tuple", "float", "int", "bool"]
)
):
if i < num_args:
# Fix for ellipsis, slices for numpy's __getitem__
# No need to try and convert them into arrays
# since asarray throws unpredictable bugs
if _check_in_nested_sequence(arg, value=Ellipsis, _type=slice):
continue
if not ivy.is_array(arg):
args[i] = ivy.array(arg, device=device)
elif parameters in kwargs:
kwarg = kwargs[parameter]
if not ivy.is_array(kwarg):
kwargs[parameter] = ivy.array(kwarg, device=device)
return fn(*args, **kwargs)
_handle_array_like_without_promotion.handle_array_like_without_promotion = True
return _handle_array_like_without_promotion
def inputs_to_native_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_native_arrays(*args, **kwargs):
"""Convert all `ivy.Array` instances in both the positional and keyword
arguments into `ivy.NativeArray` instances, and then calls the function
with the updated arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with native arrays passed in the arguments.
"""
if not ivy.array_mode:
return fn(*args, **kwargs)
# check if kwargs contains an out argument, and if so, remove it
has_out = False
out = None
if "out" in kwargs:
out = kwargs["out"]
del kwargs["out"]
has_out = True
# convert all arrays in the inputs to ivy.NativeArray instances
new_args, new_kwargs = ivy.args_to_native(*args, **kwargs)
# add the original out argument back to the keyword arguments
if has_out:
new_kwargs["out"] = out
return fn(*new_args, **new_kwargs)
_inputs_to_native_arrays.inputs_to_native_arrays = True
return _inputs_to_native_arrays
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays(*args, **kwargs):
"""Convert all `ivy.NativeArray` instances in both the positional and
keyword arguments into `ivy.Array` instances, and then calls the
function with the updated arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays passed in the arguments.
"""
if not ivy.array_mode:
warnings.warn(
"In the case of Compositional function, operators might cause"
" inconsistent behavior when array_mode is set to False"
)
return fn(*args, **kwargs)
has_out = False
if "out" in kwargs:
out = kwargs["out"]
has_out = True
# convert all arrays in the inputs to ivy.Array instances
ivy_args, ivy_kwargs = ivy.args_to_ivy(
*args, **kwargs, include_derived={"tuple": True}
)
if has_out:
ivy_kwargs["out"] = out
return fn(*ivy_args, **ivy_kwargs)
_inputs_to_ivy_arrays.inputs_to_ivy_arrays = True
return _inputs_to_ivy_arrays
def inputs_to_native_shapes(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_native_shapes(*args, **kwargs):
args, kwargs = ivy.nested_map(
lambda x: (x.shape if isinstance(x, ivy.Shape) and ivy.array_mode else x),
[args, kwargs],
)
return fn(*args, **kwargs)
_inputs_to_native_shapes.inputs_to_native_shapes = True
return _inputs_to_native_shapes
def outputs_to_ivy_shapes(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_ivy_shapes(*args, **kwargs):
args, kwargs = ivy.nested_map(
lambda x: (x.shape if isinstance(x, ivy.Shape) and ivy.array_mode else x),
[args, kwargs],
)
return fn(*args, **kwargs)
_outputs_to_ivy_shapes.outputs_to_ivy_shapes = True
return _outputs_to_ivy_shapes
def to_native_shapes_and_back(fn: Callable) -> Callable:
"""Make `fn` receive `ivy.NativeShape` and return `ivy.Shape`.
Wrap `fn` so that input shapes are all converted to
`ivy.NativeShape` instances and return shapes are all converted to
`ivy.Shape` instances.
"""
return outputs_to_ivy_shapes(inputs_to_native_shapes(fn))
def outputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_ivy_arrays(*args, **kwargs):
"""Call the function, and then converts all `ivy.NativeArray` instances
in the function return into `ivy.Array` instances.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with native arrays as ivy arrays.
"""
# call unmodified function
ret = fn(*args, **kwargs)
# convert all arrays in the return to `ivy.Array` instances
return (
ivy.to_ivy(ret, nested=True, include_derived={"tuple": True})
if ivy.array_mode
else ret
)
_outputs_to_ivy_arrays.outputs_to_ivy_arrays = True
return _outputs_to_ivy_arrays
def output_to_native_arrays(fn: Callable) -> Callable:
"""Call the function, and then converts all `ivy.Array` instances in the
function return into `ivy.NativeArray` instances.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays as native arrays.
"""
@functools.wraps(fn)
def _output_to_native_arrays(*args, **kwargs):
ret = fn(*args, **kwargs)
return ivy.to_native(ret, nested=True, include_derived={"tuple": True})
_output_to_native_arrays.outputs_to_native_arrays = True
return _output_to_native_arrays
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Make `fn` receive `ivy.Array` and return `ivy.NativeArray`.
Wrap `fn` so that input arrays are all converted to `ivy.Array`
instances and return arrays are all converted to `ivy.NativeArray`
instances.
"""
return output_to_native_arrays(inputs_to_ivy_arrays(fn))
def to_native_arrays_and_back(fn: Callable) -> Callable:
"""Make `fn` receive `ivy.NativeArray` and return `ivy.Array`.
Wrap `fn` so that input arrays are all converted to
`ivy.NativeArray` instances and return arrays are all converted to
`ivy.Array` instances.
"""
return outputs_to_ivy_arrays(inputs_to_native_arrays(fn))
def frontend_outputs_to_ivy_arrays(fn: Callable) -> Callable:
"""Wrap `fn` and convert all frontend arrays in its return to ivy arrays.
Used in cases when a frontend function receives a callable (frontend
function) argument. To be able to use that callable in a composition
of ivy functions, its outputs need to be converted to ivy arrays.
"""
@functools.wraps(fn)
def _outputs_to_ivy_arrays(*args, **kwargs):
ret = fn(*args, **kwargs)
return ivy.nested_map(
lambda x: x.ivy_array if hasattr(x, "ivy_array") else x,
ret,
shallow=False,
)
return _outputs_to_ivy_arrays
def handle_view(fn: Callable) -> Callable:
"""Wrap `fn` and performs view handling if copy is False.
Used for functional backends (Jax and TensorFlow). Checks if the
first arg is a view or original array by checking if the ._base
attribute is populated. If it's original it adds the returned array
to its view references, then the returned array adds the operation
to its manipulation stack and stores the original as its base. If
the first arg is a view, then the returned array copies its base and
manipulation stack, appends the new operation to the manipulation
stack and appends its reference to the base array's view_refs
attribute.
"""
@functools.wraps(fn)
def _handle_view(*args, **kwargs):
ret = fn(*args, **kwargs)
if ("copy" in kwargs and kwargs["copy"]) or not ivy.is_ivy_array(args[0]):
return ret
original = args[0]
if isinstance(ret, (list, tuple)):
for i, view in enumerate(ret):
ret[i] = _build_view(original, view, fn.__name__, args, kwargs, i)
else:
ret = _build_view(original, ret, fn.__name__, args, kwargs, None)
return ret
_handle_view.handle_view = True
return _handle_view
def handle_view_indexing(fn: Callable) -> Callable:
"""Wrap `fn` and performs view handling specifically for indexing.
As with NumPy it returns a copy if advanced indexing is performed.
Used for functional backends (Jax and TensorFlow). Checks if the
first arg is a view or original array by checking if the ._base
attribute is populated. If it's original it adds the returned array
to its view references, then the returned array adds the operation
to its manipulation stack and stores the original as its base. If
the first arg is a view, then the returned array copies its base and
manipulation stack, appends the new operation to the manipulation
stack and appends its reference to the base array's view_refs
attribute.
"""
@functools.wraps(fn)
def _handle_view_indexing(*args, **kwargs):
ret = fn(*args, **kwargs)
if ("copy" in kwargs and kwargs["copy"]) or not ivy.is_ivy_array(args[0]):
return ret
query = kwargs["query"] if "query" in kwargs else args[1]
query = query if isinstance(query, tuple) else (query,)
if [i for i in query if not isinstance(i, (slice, int))]:
return ret
original = args[0]
# ToDo: Remove hard coding of only function with this wrapper
# Need general way to convert special method to function found in ivy.__dict__
ret = _build_view(original, ret, "get_item", args, kwargs)
return ret
_handle_view_indexing.handle_view_indexing = True
return _handle_view_indexing
def _convert_numpy_arrays_to_backend_specific(*args):
if isinstance(args, np.ndarray):
np_arr_idxs = ivy.nested_argwhere(args, lambda x: isinstance(x, np.ndarray))
np_arr_val = ivy.multi_index_nest(args, np_arr_idxs)
backend_arr_vals = [ivy.array(x).to_native() for x in np_arr_val]
ivy.set_nest_at_indices(args, np_arr_idxs, backend_arr_vals)
return args
def handle_numpy_arrays_in_specific_backend(fn: Callable) -> Callable:
"""Wrap `fn` and converts all `numpy.ndarray` inputs to `torch.Tensor`
instances.
Used for functional backends (PyTorch). Converts all `numpy.ndarray`
inputs to `torch.Tensor` instances.
"""
@functools.wraps(fn)
def _handle_numpy_array_in_torch(*args, **kwargs):
args = _convert_numpy_arrays_to_backend_specific(*args)
ret = fn(*args, **kwargs)
return ret
_handle_numpy_array_in_torch.handle_numpy_arrays_in_specific_backend = True
return _handle_numpy_array_in_torch
# Data Type Handling #
# -------------------#
def infer_dtype(fn: Callable) -> Callable:
@functools.wraps(fn)
def _infer_dtype(*args, dtype=None, **kwargs):
"""Determine the correct `dtype`, and then calls the function with the
`dtype` passed explicitly.
Parameters
----------
args
The arguments to be passed to the function.
dtype
The data type for the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `dtype` passed explicitly.
"""
# find the first array argument, if required
arr = None if ivy.exists(dtype) else _get_first_array(*args, **kwargs)
# infer the correct data type
dtype = ivy.default_dtype(dtype=dtype, item=arr, as_native=True)
ivy.utils.assertions._check_jax_x64_flag(dtype)
# call the function with dtype provided explicitly
return fn(*args, dtype=dtype, **kwargs)
_infer_dtype.infer_dtype = True
return _infer_dtype
# Device Handling #
# ----------------#
def handle_device(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_device(*args, **kwargs):
"""Move all array inputs of the function to `ivy.default_device()`.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function.
"""
dev = None
if "device" in kwargs and kwargs["device"] is not None:
dev = ivy.as_native_dev(kwargs["device"])
if ivy.soft_device_mode:
with ivy.DefaultDevice(ivy.default_device(dev)):
return ivy.handle_soft_device_variable(*args, fn=fn, **kwargs)
inputs = args + tuple(kwargs.values())
devices = tuple(ivy.dev(x) for x in inputs if ivy.is_array(x))
unique_devices = set(devices)
# check if arrays are on the same device
if len(unique_devices) <= 1:
# len(unique_devices) == 0 when there are no arrays
dst_dev = (
dev
if dev is not None
else None if len(unique_devices) == 0 else next(iter(unique_devices))
)
with ivy.DefaultDevice(ivy.default_device(dst_dev)):
return ivy.handle_soft_device_variable(*args, fn=fn, **kwargs)
# raise when arrays are on different devices
elif len(unique_devices) > 1:
raise ivy.utils.exceptions.IvyException(
"Expected all input arrays to be on the same device, "
f"but found at least two devices - {devices}, "
"set `ivy.set_soft_device_mode(True)` to handle this problem."
)
return fn(*args, **kwargs)
_handle_device.handle_device = True
return _handle_device
# Inplace Update Handling #
# ------------------------#
def handle_out_argument(fn: Callable) -> Callable:
handle_out_in_backend = hasattr(fn, "support_native_out")
@functools.wraps(fn)
def _handle_out_argument(*args, out=None, **kwargs):
"""Call `fn` with the `out` argument handled correctly for performing
an inplace update.
Parameters
----------
args
The arguments to be passed to the function.
out
The array to write the result to.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with `out` handled correctly for
inplace updates.
"""
nonlocal handle_out_in_backend
if out is None:
return fn(*args, out=out, **kwargs)
if ivy.gradients._is_variable(out):
handle_out_in_backend = False
if handle_out_in_backend:
# extract underlying native array for out
native_out = ivy.to_native(out)
# compute return, with backend inplace update handled by
# the backend function
ret = fn(*args, out=native_out, **kwargs)
if isinstance(ret, (tuple, list)):
for i in range(len(ret)):
ivy.inplace_update(out[i], ret[i])
if ivy.backend == "torch":
_update_torch_views(out[i])
else:
ivy.inplace_update(out, ret)
if ivy.backend == "torch":
_update_torch_views(out)
return out
# compute return, and then handle the inplace update explicitly
ret = fn(*args, **kwargs)
if not ivy.is_array(ret) and not ivy.is_ivy_container(ret):
return ivy.nested_multi_map(
lambda x, _: ivy.inplace_update(
x[0], ivy.astype(x[1], ivy.dtype(x[0]))
),
[out, ret],
)
return ivy.inplace_update(out, ivy.astype(ret, ivy.dtype(out)))
# return output matches the dtype of the out array to match numpy and torch
_handle_out_argument.handle_out_argument = True
return _handle_out_argument
def _update_torch_views(x, visited_view=None):
if x._torch_view_refs != []:
_update_torch_references(x, visited_view)
if ivy.exists(x._torch_manipulation):
parent_tensor, fn_args_kwargs = x._torch_manipulation
fn, args, kwargs = fn_args_kwargs
kwargs["copy"] = True
if fn == "rot90":
kwargs = kwargs.copy()
kwargs["k"] = -kwargs["k"]
parent_tensor.data[()] = ivy.__dict__[fn](x, *args, **kwargs).data
if ivy.exists(x._torch_base):
_update_torch_views(x._torch_base, visited_view=x)
def _update_torch_references(x, visited_view=None):
for ref in x._torch_view_refs:
view = ref()
if ivy.exists(view) and view is not visited_view:
parent_tensor, fn_args_kwargs = view._torch_manipulation
fn, args, kwargs = fn_args_kwargs
kwargs["copy"] = True
view.data[()] = ivy.__dict__[fn](parent_tensor, *args, **kwargs).data
if view._torch_view_refs != []:
_update_torch_references(view)
# Nestable Handling #
# ------------------#
def handle_nestable(fn: Callable) -> Callable:
fn_name = fn.__name__
@functools.wraps(fn)
def _handle_nestable(*args, **kwargs):
"""Call `fn` with the *nestable* property of the function correctly
handled. This means mapping the function to the container leaves if any
containers are passed in the input.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with the nestable property handled correctly.
"""
# if any of the arguments or keyword arguments passed to the function contains
# a container, get the container's version of the function and call it using
# the passed arguments.
if hasattr(ivy.Container, f"_static_{fn_name}"):
cont_fn = getattr(ivy.Container, f"_static_{fn_name}")
else:
def cont_fn(*args, **kwargs):
return ivy.Container.cont_multi_map_in_function(fn, *args, **kwargs)
if ivy.nestable_mode and (
ivy.nested_any(args, ivy.is_ivy_container, check_nests=True)
or ivy.nested_any(kwargs, ivy.is_ivy_container, check_nests=True)
):
return cont_fn(*args, **kwargs)
# if the passed arguments does not contain a container, the function using
# the passed arguments, returning an ivy or a native array.
return fn(*args, **kwargs)
_handle_nestable.handle_nestable = True
return _handle_nestable
def handle_ragged(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_ragged(*args, **kwargs):
"""Call `fn` with the *ragged* property of the function correctly
handled. This means mapping the function to the RaggedArray arrays if
any RaggedArrays are passed in the input.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with the ragged property handled correctly.
"""
def nested_fn(*args, **kwargs):
return ivy.NestedArray.ragged_multi_map_in_function(fn, *args, **kwargs)
if ivy.nested_any(
args, ivy.is_ivy_nested_array, check_nests=True
) or ivy.nested_any(kwargs, ivy.is_ivy_nested_array, check_nests=True):
return nested_fn(*args, **kwargs)
# if the passed arguments does not contain a container, the function using
# the passed arguments, returning an ivy or a native array.
return fn(*args, **kwargs)
_handle_ragged.handle_ragged = True
return _handle_ragged
# Partial Mixed Function Handling #
def handle_partial_mixed_function(fn) -> Callable:
@functools.wraps(fn)
def _handle_partial_mixed_function(*args, **kwargs):
handle_mixed_in_backend = False
if not hasattr(fn, "partial_mixed_handler"):
handle_mixed_in_backend = True
else:
compos = getattr(fn, "compos")
condition = getattr(fn, "partial_mixed_handler")
if handle_mixed_in_backend or condition(*args, **kwargs):
return fn(*args, **kwargs)
return compos(*args, **kwargs)
_handle_partial_mixed_function.handle_partial_mixed_function = True
return _handle_partial_mixed_function
# Temporary asarray wrapper (Please request my review before removing)
def temp_asarray_wrapper(fn: Callable) -> Callable:
@functools.wraps(fn)
def _temp_asarray_wrapper(*args, **kwargs):
"""Convert `Tensor` into `ivy.Array` instances.
Convert all `Tensor` instances in both the positional and keyword arguments
into `ivy.Array` instances, and then call the function with the updated
arguments.
"""
def _to_ivy_array(x):
# if x is a frontend torch Tensor (or any frontend "Tensor" actually) return the wrapped ivy array # noqa: E501
if hasattr(x, "ivy_array"):
return x.ivy_array
# else just return x
return x
# convert all input arrays to ivy.Array instances
new_args = ivy.nested_map(
_to_ivy_array, args, include_derived={"tuple": True}, shallow=False
)
new_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False
)
return fn(*new_args, **new_kwargs)
_temp_asarray_wrapper.temp_asarray_wrapper = True
return _temp_asarray_wrapper
# Download compiled cython wrapper wrapper
def download_cython_wrapper_wrapper(fn: Callable) -> Callable:
@functools.wraps(fn)
def _download_cython_wrapper_wrapper(*args, **kwargs):
"""Wrap the function to download compiled cython wrapper for the
function and re- wraps it with the downloaded wrapper.
Download the compiled cython wrapper by calling
ivy.wrappers.get_wrapper(func_name: str) and then wrap the
function with the downloaded wrapper.
"""
ivy.wrappers.download_cython_wrapper(fn.__name__)
ivy.wrappers.load_one_wrapper(fn.__name__)
ivy.functional.__dict__[fn.__name__] = getattr(
ivy.wrappers, fn.__name__ + "_wrapper"
)(fn)
return ivy.functional.__dict__[fn.__name__](*args, **kwargs)
return _download_cython_wrapper_wrapper
# Functions #
def _wrap_function(
key: str, to_wrap: Callable, original: Callable, compositional: bool = False
) -> Callable:
"""Apply wrapping to backend implementation `to_wrap` if the original
implementation `original` is also wrapped, and if `to_wrap` is not already
wrapped. Attributes `handle_nestable` etc are set during wrapping, hence
indicate to us whether a certain function has been wrapped or not. Also
handles wrapping of the `linalg` namespace.
Parameters
----------
to_wrap
the new implementation to potentially wrap
original
the original implementation of `to_wrap` which tells us which wrappers we need.
compositional
indicates whether the function being wrapped is compositional
(Default Value = ``False``).
Returns
-------
ret
`to_wrap` appropriately wrapped if `to_wrap` is a function, otherwise just the
input is returned.
"""
if key == "linalg":
for linalg_k, linalg_v in to_wrap.__dict__.items():
if (
isinstance(linalg_v, FunctionType)
and linalg_k.lower() != "namedtuple"
and linalg_k != "with_unsupported_dtypes"
and not linalg_k.startswith("_")
):
to_wrap.__dict__[linalg_k] = _wrap_function(
linalg_k,
linalg_v,
ivy.__dict__[linalg_k],
compositional=compositional,
)
return to_wrap
if isinstance(to_wrap, FunctionType):
if ivy.cython_wrappers_mode and ivy.wrappers.wrapper_exists(to_wrap.__name__):
if to_wrap.__name__ + "_wrapper" in ivy.wrappers.__all__:
to_wrap = getattr(ivy.wrappers, to_wrap.__name__ + "_wrapper")(to_wrap)
return to_wrap
else:
return download_cython_wrapper_wrapper(to_wrap)
# set attributes
for attr in original.__dict__.keys():
# private attribute or decorator
if (
attr.startswith("_")
or hasattr(ivy, attr)
or attr == "mixed_backend_wrappers"
):
continue
setattr(to_wrap, attr, getattr(original, attr))
# Copy docstring
docstring_attr = ["__annotations__", "__doc__"]
for attr in docstring_attr:
setattr(to_wrap, attr, getattr(original, attr))
mixed_fn = hasattr(original, "mixed_backend_wrappers") and original != to_wrap
partial_mixed = (
mixed_fn
and hasattr(original, "handle_partial_mixed_function")
and hasattr(to_wrap, "partial_mixed_handler")
)
add_wrappers, skip_wrappers = [], []
if mixed_fn:
backend_wrappers = getattr(original, "mixed_backend_wrappers")
add_wrappers = backend_wrappers.get("to_add")
skip_wrappers = backend_wrappers.get("to_skip")
for attr in FN_DECORATORS:
if hasattr(original, attr) and not hasattr(to_wrap, attr):
if partial_mixed and attr == "handle_partial_mixed_function":
to_wrap.compos = original
to_wrap = handle_partial_mixed_function(to_wrap)
if attr not in skip_wrappers:
to_wrap = getattr(ivy, attr)(to_wrap)
if attr in add_wrappers:
to_wrap = getattr(ivy, attr)(to_wrap)
# we should remove the all the decorators
# after handle_mixed_fuction in FN_DECORATORS
# from the compos function because these will
# be run from the primary implementation.
if partial_mixed:
array_spec = to_wrap.compos.__dict__["array_spec"]
for attr in FN_DECORATORS[
-1 : FN_DECORATORS.index("handle_partial_mixed_function") : -1
]:
if hasattr(to_wrap.compos, attr):
to_wrap.compos = to_wrap.compos.__wrapped__
to_wrap.compos.__dict__["array_spec"] = array_spec
return to_wrap
def casting_modes_ops(fn, ret_dtype_target=None):
@functools.wraps(fn)
def method(*args, **kwargs):
# Get the function signature
signature = inspect.signature(fn)
# Extract argument names
arg_names = [param.name for param in signature.parameters.values()]
# we first check if it has unsupported/supported dtypes uniquely added to it
intersect = set(ivy.function_unsupported_dtypes(fn)).difference(
set(ivy.invalid_dtypes)
)
if not intersect:
# doesn't have unsupported dtypes specified
# so check if it's one of the device_and_dtype one
intersect = set(
ivy.function_unsupported_devices_and_dtypes(fn).get(
ivy.default_device().split(":")[0], {None}
)
).difference(set(ivy.invalid_dtypes))
if not intersect:
# no unsupported dtype specified
return fn(*args, **kwargs)
# specifies which dtype to cast the output to
to_cast = None
if "dtype" in kwargs and kwargs["dtype"] is not None:
to_cast = kwargs["dtype"]
dtype = caster(kwargs["dtype"], intersect)
if dtype:
kwargs["dtype"] = ivy.as_native_dtype(dtype)
def mini_helper(x):
if not hasattr(x, "dtype"):
return x
dtype = caster(x, intersect)
if dtype:
x = ivy.to_native(ivy.astype(x, ivy.as_native_dtype(dtype)))
return x
args = ivy.nested_map(mini_helper, args, include_derived=True)
kwargs = ivy.nested_map(mini_helper, kwargs)
if not to_cast and ret_dtype_target:
for arg in ret_dtype_target:
if arg:
to_cast, arg_mod = ivy.promote_types_of_inputs(
to_cast,
(
args[arg_names.index(arg)]
if arg not in kwargs
else kwargs[arg]
),
)
if arg not in kwargs:
args[arg_names.index(arg)] = (
arg_mod
if not ivy.is_array(args[arg_names.index(arg)])
else args[arg_names.index(arg)]
)
else:
kwargs[arg] = (
arg_mod
if not ivy.is_array(args[arg_names.index(arg)])
else kwargs[arg]
)
return (
ivy.astype(fn(*args, **kwargs), ivy.to_native(to_cast))
if to_cast
else fn(*args, **kwargs)
)
return method
# Gets dtype from a version dictionary
def _dtype_from_version(dic, version):
# if version is a string, it's a frontend function
if isinstance(version, str):
version = ivy.functional.frontends.__dict__["versions"][version]
# if version is a dict, extract the version
if isinstance(version, dict):
version = version["version"]
# If version dict is empty, then there is an error
if not dic:
raise ValueError("No version found in the dictionary")
# If key is already in the dictionary, return the value
if version in dic:
return dic[version]
version_tuple = tuple(map(int, version.split(".")))
# If key is not in the dictionary, check if it's in any range
# three formats are supported:
# 1. x.y.z and above
# 2. x.y.z and below
# 3. x.y.z to x.y.z
for key in dic.keys():
kl = key.split(" ")
k1 = tuple(map(int, kl[0].split(".")))
if "above" in key and k1 <= version_tuple:
return dic[key]
if "below" in key and k1 >= version_tuple:
return dic[key]
if "to" in key and k1 <= version_tuple <= tuple(map(int, kl[2].split("."))):
return dic[key]
# if no version is found, return the last version
return dic[list(dic.keys())[-1]]
def _versioned_attribute_factory(attribute_function, base):
class VersionedAttributes(base):
"""Class which add versioned attributes to a class, inheriting from
`base`.
Create a class which inherits `base` this way if isinstance is
called on an instance of the class, it will return True if
testing for the baseclass, such as isinstance(instance, tuple)
if `base` is tuple.
"""
def __init__(self):
self.attribute_function = attribute_function
def __get__(self, instance=None, owner=None):
# version dtypes recalculated every time it's accessed
return self.attribute_function()
def __iter__(self):
# iter allows for iteration over current version that's selected
return iter(self.__get__())
def __repr__(self):
return repr(self.__get__())
def __bool__(self):
return bool(self.__get__())
return VersionedAttributes()
def _dtype_device_wrapper_creator(attrib, t):
"""Create a wrapper for a dtype or device attribute.
The wrapper returns the correct dtype or device for the current version of the
backend.
Parameters
----------
attrib
The attribute name to be wrapped. for example, "unsupported_dtypes"
t
The type of the attribute. for example, "tuple"
Returns
-------
A wrapper function for the attribute.
"""
def _wrapper_outer(version_dict, version, exclusive=True, ret_dtype_target=None):
def _wrapped(func):
val = _versioned_attribute_factory(
lambda: _dtype_from_version(version_dict, version), t
)
if hasattr(func, "override"):
# we do nothing
return func
if not exclusive:
# exclusive attribute comes into existence
# only when exclusive is passed as true
setattr(func, "exclusive", True)
# set the attribute on the function and return the function as is
has_attrib = [
attribute for attribute in attribute_dict if hasattr(func, attribute)
] or False
if has_attrib:
for attribs in has_attrib:
if not (
attrib == attribs or (attrib, attribs) in attribute_conflict
):
# cases when we encounter two different decorators
# applied to the function, but they are not same
# and aren't in conflicting dict either
setattr(func, attrib, val)
setattr(func, "dictionary_info", (version_dict, version))
elif hasattr(func, "exclusive"):
if attrib == attribs:
# we see a higher decorator with exclusivity applied
# we use this decorator's dict information
# and previous decorator's dict information
# to update this
old_version_dict = getattr(func, "dictionary_info")[0]
old_version_dict.update(version_dict)
val = _versioned_attribute_factory(
lambda: _dtype_from_version(
version_dict, old_version_dict
),
t,
)
setattr(func, attrib, val)
else:
# for conflicting ones we do nothing
pass
else:
if not val and attrib.startswith("supported"):
setattr(func, f"un{attrib}", val)
else:
setattr(func, attrib, val)
setattr(func, "dictionary_info", (version_dict, version))
if "frontends" in func.__module__:
# it's a frontend func, no casting modes for this
return func
return casting_modes_ops(func, ret_dtype_target=ret_dtype_target)
return _wrapped
return _wrapper_outer
# nans Handling #
# --------------#
def _leaf_has_nans(x):
if isinstance(x, ivy.Container):
return x.has_nans()
elif ivy.is_array(x):
return ivy.isnan(x).any()
elif np.isnan(x):
return True
return False
def _nest_has_nans(x):
return ivy.nested_any(x, _leaf_has_nans)
def handle_nans(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_nans(*args, **kwargs):
"""Check for the existence of nans in all arrays in the `args` and
`kwargs`.
The presence of nans is then handled depending on the enabled `nan_policy`.
Following policies apply:
raise_exception: raises an exception in case nans are present
warns: warns a user in case nans are present
nothing: does nothing
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with handling of inputs based
on the selected `nan_policy`.
"""
nan_policy = ivy.nan_policy
# skip the check if the current nan policy is `nothing``
if nan_policy == "nothing":
return fn(*args, **kwargs)
# check all args and kwargs for presence of nans
result = _nest_has_nans(args) or _nest_has_nans(kwargs)
if result:
# handle nans based on the selected policy
if nan_policy == "raise_exception":
raise ivy.utils.exceptions.IvyException(
"Nans are not allowed in `raise_exception` policy."
)
elif nan_policy == "warns":
logging.warning("Nans are present in the input.")
return fn(*args, **kwargs)
_handle_nans.handle_nans = True
return _handle_nans
# Complex number handling #
# ----------------------- #
def handle_complex_input(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_complex_input(
inp,
*args,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
**kwargs,
):
"""Check whether the first positional argument is an array of complex
type, and if so handle it according to the provided `complex_mode`.
The options are:
`"jax"` (default): emulate the behaviour of the JAX framework. If the function
has a `jax_like` attribute then this will be used to decide on the
behaviour (see below) and if not, then the entire array will be passed to
the function.
`"split"`: execute the function separately on the real and imaginary parts of
the input.
`"magnitude"`: execute the function on the magnitude of the input, and keep the
angle constant.
The `jax_like` attribute (which should be added to the function itself, and not
passed as a parameter) has the following options:
`"entire"` (default): pass the entire input to the function. This is best used
for purely mathematical operators which are already well defined on complex
inputs, as many backends will throw exceptions otherwise.
`"split"`: as the `"split"` option for `complex_mode`
`"magnitude"`: as the `"magnitude"` option for `complex_mode`
A callable function: the function will be called instead of the originally
decorated function. It will be passed `inp` and `*args` as positional
arguments, and the original `**kwargs` plus `fn_original` as keyword
arguments. The latter is the original function, in case the `jax_like`
function wishes to call it.
Parameters
----------
inp
The first positional argument to the function, which is expected to be an
:class:`ivy.Array`.
args
The remaining positional arguments to be passed to the function.
complex_mode
Optional argument which specifies the method that will be used to handle
the input, if it is complex.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with handling of inputs based
on the selected `complex_mode`.
Examples
--------
Using the default `jax_like` behaviour
>>> @handle_complex_input
>>> def my_func(inp):
>>> return ivy.ones_like(inp)
>>> x = ivy.array([1+1j, 3+4j, 5+12j])
>>> my_func(x) # equivalent to setting complex_mode="jax"
ivy.array([1.+0.j, 1.+0.j, 1.+0.j])
>>> my_func(x, complex_mode="split")
ivy.array([1.+1.j, 1.+1.j, 1.+1.j])
>>> my_func(x, complex_mode="magnitude")
ivy.array([0.70710681+0.70710675j, 0.60000001+0.79999999j,
0.38461535+0.92307694j])
Using non-default `jax_like` behaviour
>>> @handle_complex_input
>>> def my_func(inp):
>>> return ivy.ones_like(inp)
>>> my_func.jax_like = "split"
>>> my_func(x, complex_mode="jax")
ivy.array([1.+1.j, 1.+1.j, 1.+1.j])
Using callable `jax_like` behaviour
>>> def _my_func_jax_like(inp, fn_original=None):
>>> return fn_original(inp) * 3j
>>> @handle_complex_input
>>> def my_func(inp):
>>> return ivy.ones_like(inp)
>>> my_func.jax_like = _my_func_jax_like
>>> my_func(x, complex_mode="jax")
ivy.array([0.+3.j, 0.+3.j, 0.+3.j])
"""
if not ivy.is_complex_dtype(inp):
return fn(inp, *args, **kwargs)
jax_like = fn.jax_like if hasattr(fn, "jax_like") else "entire"
if complex_mode == "split" or (complex_mode == "jax" and jax_like == "split"):
real_inp = ivy.real(inp).data
imag_inp = ivy.imag(inp).data
if "out" in kwargs and kwargs["out"] is not None:
out = kwargs.pop("out")
real_ret = fn(real_inp, *args, out=ivy.real(out), **kwargs)
imag_ret = fn(imag_inp, *args, out=ivy.imag(out), **kwargs)
else:
real_ret = fn(real_inp, *args, **kwargs)
imag_ret = fn(imag_inp, *args, **kwargs)
return ivy.add(
real_ret,
ivy.multiply(ivy.array(1j, dtype=inp.dtype), imag_ret),
)
elif complex_mode == "magnitude" or (
complex_mode == "jax" and jax_like == "magnitude"
):
mag_inp = ivy.abs(inp).data
angle_inp = ivy.angle(inp).data
return ivy.multiply(
fn(mag_inp, *args, **kwargs), ivy.exp(ivy.multiply(1j, angle_inp))
)
elif complex_mode == "jax" and jax_like == "entire":
return fn(inp, *args, **kwargs)
elif complex_mode == "jax":
return jax_like(inp, *args, **kwargs, fn_original=fn)
else:
raise IvyValueError(f"complex_mode '{complex_mode}' is not recognised.")
_handle_complex_input.handle_complex_input = True
return _handle_complex_input
def handle_backend_invalid(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_backend_invalid(*args, **kwargs):
"""Check if any of the arguments (or nested arguments) passed to the
function are instances of ivy.Array or ivy.NativeArray. If so, it
returns the function. If not, it raises an InvalidBackendException.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function if the current
backend matches the argument backend.
If not, it raises an InvalidBackendException
"""
array_indices = ivy.nested_argwhere(
[args, kwargs], lambda x: isinstance(x, ivy.Array)
)
array_vals = ivy.multi_index_nest([args, kwargs], array_indices)
def func(x):
target_backend = ivy.utils.backend.handler._determine_backend_from_args(x)
if (
target_backend is not None
and ivy.backend != ""
and ivy.current_backend_str() != target_backend.backend
):
raise ivy.utils.exceptions.IvyInvalidBackendException(
"Operation not allowed. Array was instantiated with backend"
f" {target_backend.backend}. But current backend is"
f" {ivy.backend}. Please set dynamic=True"
" for the array if you want to convert it to the target"
" backend"
)
return x
ivy.nested_map(func, array_vals, include_derived=True)
return fn(*args, **kwargs)
_handle_backend_invalid.handle_backend_invalid = True
return _handle_backend_invalid
attribute_dict = {
"unsupported_dtypes",
"supported_dtypes",
"unsupported_devices",
"supported_devices",
"unsupported_device_and_dtype",
"supported_device_and_dtype",
}
attribute_conflict = {
("unsupported_devices", "supported_devices"),
("supported_devices", "unsupported_devices"),
("unsupported_device_and_dtype", "supported_device_and_dtype"),
("supported_device_and_dtype", "unsupported_device_and_dtype"),
}
# TODO see if the globals_getter_func can be hacked to return
# the globals in the module where it is working
def globals_getter_func(x=None):
# define and assign this function to
# ivy.func_wrapper.globals_getter_func in the module
# where you want to use the decorators as a context
# manager
if not x:
return globals()
else:
globals()[x[0]] = x[1]
class with_unsupported_dtypes(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("unsupported_dtypes", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"unsupported_dtypes", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class with_supported_dtypes(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("supported_dtypes", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"supported_dtypes", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class with_unsupported_devices(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("unsupported_devices", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"unsupported_devices", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class with_supported_devices(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("supported_devices", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"supported_devices", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class with_unsupported_device_and_dtypes(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
# arg inspection
dicti = args[0]
self.kwargs = kwargs
# iterate through the keys
for key in dicti.keys():
# maintain a dictionary for nested dictionary
nested_dic = {}
for nested_key in dicti[key].keys():
if nested_key == "all":
nested_dic["cpu"] = dicti[key].get("cpu", ()) + tuple(
dicti[key]["all"]
)
nested_dic["tpu"] = dicti[key].get("tpu", ()) + tuple(
dicti[key]["all"]
)
nested_dic["gpu"] = dicti[key].get("gpu", ()) + tuple(
dicti[key]["all"]
)
else:
nested_dic[nested_key] = tuple(dicti[key][nested_key])
dicti[key] = nested_dic
args = (dicti, args[1])
self.args = args
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("unsupported_device_and_dtype", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals.keys()))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"unsupported_device_and_dtype", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class with_supported_device_and_dtypes(contextlib.ContextDecorator):
def __init__(self, *args, **kwargs):
# arg inspection
dicti = args[0]
self.kwargs = kwargs
# iterate through the keys
for key in dicti.keys():
# maintain a dictionary for nested dictionary
nested_dic = {}
for nested_key in dicti[key].keys():
if nested_key == "all":
nested_dic["cpu"] = dicti[key].get("cpu", ()) + tuple(
dicti[key]["all"]
)
nested_dic["tpu"] = dicti[key].get("tpu", ()) + tuple(
dicti[key]["all"]
)
nested_dic["gpu"] = dicti[key].get("gpu", ()) + tuple(
dicti[key]["all"]
)
else:
nested_dic[nested_key] = tuple(dicti[key][nested_key])
dicti[key] = nested_dic
args = (dicti, args[1])
self.args = args
self.globals = {}
def __call__(self, func=None):
if func:
return (
_dtype_device_wrapper_creator("supported_device_and_dtype", tuple)(
*self.args, **self.kwargs
)
)(func)
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals_getter_func().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func(
[
item,
(
_dtype_device_wrapper_creator(
"supported_device_and_dtype", tuple
)(*self.args, **self.kwargs)
)(globals_getter_func()[item]),
]
)
class override(contextlib.ContextDecorator):
def __call__(self, func=None):
if func:
setattr(func, "override", "override")
return func
def __enter__(self):
self.globals = globals_getter_func().copy() # global snapshot
def __exit__(self, *exec):
new_globals = set(globals().keys())
diff = new_globals.difference(set(self.globals))
for item in diff:
if globals_getter_func().get(item, None):
if isinstance(globals_getter_func()[item], FunctionType):
# we need to add the decorator
globals_getter_func([item, "override"])
| ivy/ivy/func_wrapper.py/0 | {
"file_path": "ivy/ivy/func_wrapper.py",
"repo_id": "ivy",
"token_count": 32981
} | 15 |
import operator
from typing import Optional, Union, Tuple, List, Sequence
from numbers import Number
from ivy import (
promote_types_of_inputs,
default_float_dtype,
is_float_dtype,
)
from ivy.func_wrapper import (
with_supported_dtypes,
)
from ivy.functional.backends.jax import JaxArray
import jax.numpy as jnp
import jax.scipy as js
import jax.lax as jlax
from .. import backend_version
jax_ArrayLike = Union[JaxArray, Number]
def amax(
x: JaxArray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
axis = tuple(axis) if isinstance(axis, list) else axis
ret = jnp.amax(a=jnp.asarray(x), axis=axis, keepdims=keepdims)
return jnp.asarray(ret) if jnp.isscalar(ret) else ret
def amin(
x: JaxArray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
axis = tuple(axis) if isinstance(axis, list) else axis
ret = jnp.amin(a=jnp.asarray(x), axis=axis, keepdims=keepdims)
return jnp.asarray(ret) if jnp.isscalar(ret) else ret
def sinc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
return jnp.sinc(x)
@with_supported_dtypes(
{"0.4.24 and below": ("float16", "float32", "float64")}, backend_version
)
def lgamma(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
return jlax.lgamma(x)
def fmax(
x1: JaxArray,
x2: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
x1, x2 = promote_types_of_inputs(x1, x2)
return jnp.fmax(x1, x2)
def float_power(
x1: Union[JaxArray, float, list, tuple],
x2: Union[JaxArray, float, list, tuple],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
x1, x2 = promote_types_of_inputs(x1, x2)
if jnp.any(jnp.iscomplex(x1)) or jnp.any(jnp.iscomplex(x2)):
out_dtype = jnp.complex128
else:
out_dtype = jnp.float64
return jnp.float_power(x1, x2).astype(out_dtype)
def copysign(
x1: jax_ArrayLike,
x2: jax_ArrayLike,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
x1, x2 = promote_types_of_inputs(x1, x2)
if not is_float_dtype(x1):
x1 = x1.astype(default_float_dtype(as_native=True))
x2 = x2.astype(default_float_dtype(as_native=True))
return jnp.copysign(x1, x2)
def count_nonzero(
a: JaxArray,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[jnp.dtype] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
if dtype is None:
return jnp.count_nonzero(a, axis=axis, keepdims=keepdims)
return jnp.array(jnp.count_nonzero(a, axis=axis, keepdims=keepdims), dtype=dtype)
def nansum(
x: JaxArray,
/,
*,
axis: Optional[Union[Tuple[int, ...], int]] = None,
dtype: Optional[jnp.dtype] = None,
keepdims: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
if isinstance(axis, list):
axis = tuple(axis)
return jnp.nansum(x, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def isclose(
a: JaxArray,
b: JaxArray,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
def signbit(
x: Union[JaxArray, float, int, list, tuple],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.signbit(x)
def hypot(
x1: JaxArray,
x2: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.hypot(x1, x2)
def allclose(
x1: JaxArray,
x2: JaxArray,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[JaxArray] = None,
) -> bool:
return jnp.allclose(x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan)
def diff(
x: JaxArray,
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[JaxArray, int, float, list, tuple]] = None,
append: Optional[Union[JaxArray, int, float, list, tuple]] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
x = jnp.asarray(x)
if isinstance(prepend, (list, tuple)):
prepend = jnp.asarray(prepend)
if isinstance(append, (list, tuple)):
append = jnp.asarray(append)
return jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
def fix(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.fix(x, out=out)
def nextafter(
x1: JaxArray,
x2: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.nextafter(x1, x2)
def zeta(
x: JaxArray,
q: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
temp = jnp.logical_and(jnp.greater(x, 0), jnp.equal(jnp.remainder(x, 2), 0))
temp = jnp.logical_and(temp, jnp.less_equal(q, 0))
temp = jnp.logical_and(temp, jnp.equal(jnp.remainder(q, 1), 0))
inf_indices = jnp.logical_or(temp, jnp.equal(x, 1))
temp = jnp.logical_and(jnp.not_equal(jnp.remainder(x, 2), 0), jnp.greater(x, 1))
temp = jnp.logical_and(temp, jnp.less_equal(q, 0))
nan_indices = jnp.logical_or(temp, jnp.less(x, 1))
ret = js.special.zeta(x, q)
ret = ret.at[nan_indices].set(jnp.nan)
ret = ret.at[inf_indices].set(jnp.inf)
return ret
# def gradient(
# x: JaxArray,
# /,
# *,
# spacing: Optional[Union[int, list, tuple]] = 1,
# axis: Optional[Union[int, list, tuple]] = None,
# edge_order: Optional[int] = 1,
# ) -> Union[JaxArray, List[JaxArray]]:
# if type(spacing) == int:
# return jnp.gradient(x, spacing, axis=axis)
# return jnp.gradient(x, *spacing, axis=axis)
def _normalize_axis_index(ax: int, ndim: int) -> int:
if ax >= ndim or ax < -ndim:
raise ValueError("axis index is out of range")
return (ax + ndim) % ndim
def _normalize_axis_tuple(axis: Union[int, list, tuple], ndim: int) -> Tuple[int, ...]:
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
axis = tuple(_normalize_axis_index(ax, ndim) for ax in axis)
if len(set(axis)) != len(axis):
raise ValueError("repeated axis")
return axis
def gradient(
x: JaxArray,
/,
*,
spacing: Union[int, list, tuple] = 1,
axis: Optional[Union[int, list, tuple]] = None,
edge_order: int = 1,
) -> Union[JaxArray, List[JaxArray]]:
f = jnp.asarray(x)
N = f.ndim # number of dimensions
if axis is None:
axes = tuple(range(N))
else:
axes = _normalize_axis_tuple(axis, N)
len_axes = len(axes)
n = (
-1
if spacing is None
else (0 if type(spacing) in (int, float) else len(spacing))
)
if n == -1:
# no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
if n == 0:
# no spacing argument - use 1 in all axes
dx = [spacing] * len_axes
elif n == 1 and jnp.ndim(spacing[0]) == 0:
# single scalar for all axes
dx = spacing * len_axes
elif n == len_axes:
# scalar or 1d array for each axis
dx = list(spacing)
for i, distances in enumerate(dx):
distances = jnp.asarray(distances)
if distances.ndim == 0:
continue
elif distances.ndim != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
raise ValueError(
"when 1d, distances must match "
"the length of the corresponding dimension"
)
if jnp.issubdtype(distances.dtype, jnp.integer):
# Convert numpy integer types to float64 to avoid modular
# arithmetic in np.diff(distances).
distances = distances.astype(jnp.float64)
diffx = jnp.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
else:
raise TypeError("invalid number of arguments")
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)] * N
slice2 = [slice(None)] * N
slice3 = [slice(None)] * N
slice4 = [slice(None)] * N
otype = f.dtype
if jnp.issubdtype(otype, jnp.integer):
f = f.astype(jnp.float64)
for axis, ax_dx in zip(axes, dx):
if f.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required."
)
# result allocation
out = jnp.empty_like(f, dtype=otype)
# spacing for the current axis
uniform_spacing = jnp.ndim(ax_dx) == 0
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out = out.at[tuple(slice1)].set(
(f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx)
)
else:
dx1 = ax_dx[0:-1]
dx2 = ax_dx[1:]
a = -(dx2) / (dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = jnp.ones(N, dtype=int)
# shape[axis] = -1
shape = shape.at[axis].set(-1)
jnp.reshape(a, shape)
jnp.reshape(b, shape)
jnp.reshape(c, shape)
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out = out.at[tuple(slice1)].set(
a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
)
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = ax_dx if uniform_spacing else ax_dx[0]
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out = out.at[tuple(slice1)].set(
(f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
)
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = ax_dx if uniform_spacing else ax_dx[-1]
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out = out.at[tuple(slice1)].set(
(f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
)
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / ax_dx
b = 2.0 / ax_dx
c = -0.5 / ax_dx
else:
dx1 = ax_dx[0]
dx2 = ax_dx[1]
a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = -dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
out = out.at[tuple(slice1)].set(
a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
)
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / ax_dx
b = -2.0 / ax_dx
c = 1.5 / ax_dx
else:
dx1 = ax_dx[-2]
dx2 = ax_dx[-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = -(dx2 + dx1) / (dx1 * dx2)
c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out = out.at[tuple(slice1)].set(
a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
)
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
def xlogy(x: JaxArray, y: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
x, y = promote_types_of_inputs(x, y)
return js.special.xlogy(x, y)
def conj(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.conj(x)
def ldexp(
x1: JaxArray, x2: Union[JaxArray, int], /, *, out: Optional[JaxArray] = None
) -> JaxArray:
return jnp.ldexp(x1, x2)
def frexp(
x: JaxArray, /, *, out: Optional[Tuple[JaxArray, JaxArray]] = None
) -> Tuple[JaxArray, JaxArray]:
return jnp.frexp(x)
def modf(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return jnp.modf(x)
def digamma(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return js.special.digamma(x)
def erfc(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return js.special.erfc(x)
def erfinv(
x: JaxArray,
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
return js.special.erfinv(x)
| ivy/ivy/functional/backends/jax/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 7192
} | 16 |
"""Collection of Jax gradient functions, wrapped to fit Ivy syntax and
signature."""
# global
import jax
import jax.lax as jlax
from ivy.functional.backends.jax import JaxArray, NativeArray
from typing import Optional, Callable, Sequence, Union, Tuple
# local
import ivy
from ivy.functional.ivy.gradients import (
_get_required_float_variables,
_get_y_and_ret_idxs,
_get_native_variables_and_indices,
_set_duplicates,
_process_func_ret_and_grads,
)
# ToDo: modify these functions to track whether variable() has been called
def variable(x, /):
return x
def is_variable(x, /, *, exclusive=False):
if exclusive:
return False
return isinstance(x, NativeArray)
def variable_data(x: JaxArray, /) -> JaxArray:
return x
def _forward_fn(
xs, x, func, duplicate_index_chains, xs_grad_idxs=None, ret_grad_idxs=None
):
"""Forward function for gradient calculation."""
# Setting x(relevant variables) into xs(all variables)
x = ivy.nested_map(ivy.to_ivy, x, include_derived=True)
x_arr_idxs = ivy.nested_argwhere(x, ivy.is_array)
x_arr_values = ivy.multi_index_nest(x, x_arr_idxs)
if xs_grad_idxs is not None:
xs_grad_arr_idxs = []
for grad_idx in xs_grad_idxs:
xs_grad_arr_idx = ivy.nested_argwhere(
ivy.index_nest(xs, grad_idx), ivy.is_array
)
for idx in xs_grad_arr_idx:
xs_grad_arr_idxs.append(list(grad_idx) + idx)
ivy.set_nest_at_indices(xs, xs_grad_arr_idxs, x_arr_values)
elif ivy.is_array(xs):
xs = x
else:
xs_arr_idxs = ivy.nested_argwhere(xs, lambda x: ivy.is_array(x))
ivy.set_nest_at_indices(xs, xs_arr_idxs, x_arr_values)
# Setting duplicates to ensure same references as in the original input
if not ivy.is_array(xs):
xs = _set_duplicates(xs, duplicate_index_chains)
ret = func(xs)
# Getting the relevant outputs from the function return for gradient calculation
_, ret_values = _get_native_variables_and_indices(ret, idxs=ret_grad_idxs)
if isinstance(ret_values, list) and len(ret_values) == 1 and ret_grad_idxs is None:
ret_values = ret_values[0]
return ret_values
def execute_with_gradients(
func,
xs: JaxArray,
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
):
# Conversion of required arrays to float variables and duplicate index chains
(
xs,
xs_grad_idxs,
xs_required,
required_duplicate_index_chains,
duplicate_index_chains,
) = _get_required_float_variables(xs, xs_grad_idxs)
func_ret = func(xs)
# Getting the relevant outputs from the function return for gradient calculation
ret_grad_idxs, y, ret_idxs = _get_y_and_ret_idxs(func_ret, ret_grad_idxs)
if isinstance(y, ivy.NativeArray):
# Gradient calculation for a single output
grad_fn = jax.grad(
lambda x: _forward_fn(
xs,
x,
func,
duplicate_index_chains,
xs_grad_idxs=xs_grad_idxs,
ret_grad_idxs=ret_grad_idxs,
)
)
grads = _set_duplicates(grad_fn(xs_required), required_duplicate_index_chains)
else:
# Gradient calculation for multiple outputs
grad_fn = jax.jacrev(
lambda x: _forward_fn(
xs,
x,
func,
duplicate_index_chains,
xs_grad_idxs=xs_grad_idxs,
ret_grad_idxs=ret_grad_idxs,
)
)
grads_ = grad_fn(xs_required)
grads = grads_
if isinstance(ret_idxs, list) and len(ret_idxs):
grads = {
ret_idxs[i]: _set_duplicates(grad, required_duplicate_index_chains)
for i, grad in enumerate(grads_)
}
return _process_func_ret_and_grads(func_ret, grads, retain_grads)
def value_and_grad(func):
def grad_fn(xs):
return ivy.to_native(func(xs))
def callback_fn(xs):
xs = ivy.nested_map(lambda x: ivy.to_native(x), xs, include_derived=True)
value, grad = jax.value_and_grad(grad_fn)(xs)
return ivy.to_ivy(value), ivy.to_ivy(grad)
return callback_fn
def stop_gradient(
x: JaxArray, /, *, preserve_type: bool = True, out: Optional[JaxArray] = None
) -> JaxArray:
return jlax.stop_gradient(x)
def jac(func: Callable):
def grad_fn(x_in):
return ivy.to_native(
func(ivy.to_ivy(x_in, nested=True)), nested=True, include_derived=True
)
def callback_fn(x_in):
return ivy.to_ivy(
jax.jacfwd(grad_fn)(ivy.to_native(x_in, nested=True)),
nested=True,
include_derived=True,
)
return callback_fn
def grad(func: Callable, argnums: Union[int, Tuple[int]] = 0):
def grad_fn(x_in):
return ivy.to_native(func(x_in))
def callback_fn(x_in):
return ivy.to_ivy(jax.grad(grad_fn, argnums)(ivy.to_native(x_in)))
return callback_fn
| ivy/ivy/functional/backends/jax/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/gradients.py",
"repo_id": "ivy",
"token_count": 2490
} | 17 |
import mxnet as mx
from typing import Optional, Union, Sequence, List
import numpy as np
import ivy
from ivy.functional.ivy.data_type import _handle_nestable_dtype_info
from ivy.utils.exceptions import IvyNotImplementedException
ivy_dtype_dict = {
np.dtype("int8"): "int8",
np.dtype("int32"): "int32",
np.dtype("int64"): "int64",
np.dtype("uint8"): "uint8",
np.dtype("float16"): "float16",
np.dtype("float32"): "float32",
np.dtype("float64"): "float64",
np.dtype("bool"): "bool",
np.int8: "int8",
np.int32: "int32",
np.int64: "int64",
np.uint8: "uint8",
np.float16: "float16",
np.float32: "float32",
np.float64: "float64",
np.bool_: "bool",
}
native_dtype_dict = {
"int8": np.int8,
"int32": np.int32,
"int64": np.int64,
"uint8": np.uint8,
"float16": np.float16,
"float32": np.float32,
"float64": np.float64,
"bool": np.bool_,
}
char_rep_dtype_dict = {
"?": "bool",
"i": int,
"i1": "int8",
"i4": "int32",
"i8": "int64",
"f": float,
"f2": "float16",
"f4": "float32",
"f8": "float64",
"u1": "uint8",
}
class Finfo:
def __init__(self, mx_finfo: mx.np.finfo):
self._mx_finfo = mx_finfo
def __repr__(self):
return repr(self._mx_finfo)
@property
def bits(self):
return self._mx_finfo.bits
@property
def eps(self):
return float(self._mx_finfo.eps)
@property
def max(self):
return float(self._mx_finfo.max)
@property
def min(self):
return float(self._mx_finfo.min)
@property
def smallest_normal(self):
return float(self._mx_finfo.tiny)
class Bfloat16Finfo:
def __init__(self, mx_finfo: mx.np.finfo):
self._mx_finfo = mx_finfo
def __repr__(self):
return repr(self._mx_finfo)
def astype(
x: Union[(None, mx.ndarray.NDArray)],
dtype: Union[(None, str)],
/,
*,
copy: bool = True,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
dtype = ivy.as_native_dtype(dtype)
if x.dtype == dtype:
return mx.nd.copy(x) if copy else x
return x.astype(dtype)
def broadcast_arrays(
*arrays: Union[(None, mx.ndarray.NDArray)]
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def broadcast_to(
x: Union[(None, mx.ndarray.NDArray)],
/,
shape: Union[(ivy.NativeShape, Sequence[int])],
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
@_handle_nestable_dtype_info
def finfo(type: Union[str, mx.ndarray.NDArray, np.dtype], /) -> Finfo:
if isinstance(type, mx.ndarray.NDArray):
type = type.dtype
return Finfo(mx.np.finfo(ivy.as_native_dtype(type)))
@_handle_nestable_dtype_info
def iinfo(type: Union[str, mx.ndarray.NDArray, np.dtype], /) -> np.iinfo:
# using np.iinfo as mx use np dtypes and mxnet iinfo not provided
if isinstance(type, mx.ndarray.NDArray):
type = type.asnumpy().dtype
return np.iinfo(ivy.as_native_dtype(type))
def result_type(*arrays_and_dtypes: Union[(None, mx.ndarray.NDArray)]) -> ivy.Dtype:
raise IvyNotImplementedException()
def as_ivy_dtype(
dtype_in: Union[(str, int, float, complex, bool, np.dtype)], /
) -> ivy.Dtype:
if dtype_in is int:
return ivy.default_int_dtype()
if dtype_in is float:
return ivy.default_float_dtype()
if dtype_in is bool:
return ivy.Dtype("bool")
if isinstance(dtype_in, str):
if dtype_in in char_rep_dtype_dict:
return as_ivy_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict:
dtype_str = dtype_in
else:
raise ivy.utils.exceptions.IvyException(
"Cannot convert to ivy dtype."
f" {dtype_in} is not supported by MXNet backend."
)
else:
dtype_str = ivy_dtype_dict[dtype_in]
if "int" in dtype_str:
return ivy.IntDtype(dtype_str)
elif "float" in dtype_str:
return ivy.FloatDtype(dtype_str)
elif "bool" in dtype_str:
return ivy.Dtype("bool")
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot recognize {dtype_str} as a valid Dtype."
)
def as_native_dtype(dtype_in: Union[(None, str, bool, int, float, np.dtype)]) -> None:
if dtype_in is int:
return ivy.default_int_dtype(as_native=True)
if dtype_in is float:
return ivy.default_float_dtype(as_native=True)
if dtype_in is bool:
return np.dtype("bool")
if not isinstance(dtype_in, str):
return dtype_in
if dtype_in in char_rep_dtype_dict:
return as_native_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict:
return native_dtype_dict[ivy.Dtype(dtype_in)]
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot convert to MXNet dtype. {dtype_in} is not supported by MXNet."
)
def dtype(
x: Union[(None, mx.ndarray.NDArray, np.ndarray)], *, as_native: bool = False
) -> ivy.Dtype:
if as_native:
return ivy.as_native_dtype(x.dtype)
return as_ivy_dtype(x.dtype)
def dtype_bits(dtype_in: Union[(None, str, np.dtype)], /) -> int:
raise IvyNotImplementedException()
def is_native_dtype(dtype_in: Union[(None, str)], /) -> bool:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/data_type.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/data_type.py",
"repo_id": "ivy",
"token_count": 2617
} | 18 |
from typing import Union, Optional, Tuple
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def unravel_index(
indices: Union[(None, mx.ndarray.NDArray)],
shape: Tuple[int],
/,
*,
out: Optional[Tuple[Union[(None, mx.ndarray.NDArray)]]] = None,
) -> Tuple[None]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/searching.py",
"repo_id": "ivy",
"token_count": 136
} | 19 |
from typing import Union, Optional, Sequence
import mxnet as mx
from numbers import Number
# local
from ivy.utils.exceptions import IvyNotImplementedException
import ivy
def min(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def max(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def mean(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def prod(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
dtype: Optional[None] = None,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = x.dtype
if dtype != x.dtype and not ivy.is_bool_dtype(x):
x = x.astype(dtype)
if axis is None:
num_dims = len(x.shape)
axis = tuple(range(num_dims))
elif isinstance(axis, Number):
axis = (axis,)
elif isinstance(axis, list):
axis = tuple(axis)
return mx.nd.prod(x, axis=axis, keepdims=keepdims).astype(dtype)
def std(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
correction: Union[(int, float)] = 0.0,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def sum(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
dtype: Optional[None] = None,
keepdims: Optional[bool] = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = x.dtype
if dtype != x.dtype and not ivy.is_bool_dtype(x):
x = x.astype(dtype)
if axis is None:
num_dims = len(x.shape)
axis = tuple(range(num_dims))
elif isinstance(axis, Number):
axis = (axis,)
elif isinstance(axis, list):
axis = tuple(axis)
return mx.nd.sum(x, axis=axis, keepdims=keepdims).astype(dtype)
def var(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: Optional[Union[(int, Sequence[int])]] = None,
correction: Union[(int, float)] = 0.0,
keepdims: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def cumprod(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def cumsum(
x: Union[(None, mx.ndarray.NDArray)],
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
*,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def einsum(
equation: str,
*operands: Union[(None, mx.ndarray.NDArray)],
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/statistical.py",
"repo_id": "ivy",
"token_count": 1789
} | 20 |
# global
from typing import Union, Callable, Sequence
import numpy as np
# local
from . import backend_version
from ivy import with_unsupported_dtypes
@with_unsupported_dtypes({"1.26.3 and below": ("complex",)}, backend_version)
def reduce(
operand: np.ndarray,
init_value: Union[int, float],
computation: Callable,
/,
*,
axes: Union[int, Sequence[int]] = 0,
keepdims: bool = False,
) -> np.ndarray:
axes = (
(axes,)
if isinstance(axes, int)
else tuple(axes) if isinstance(axes, list) else axes
)
reduced_func = np.frompyfunc(computation, 2, 1).reduce
op_dtype = operand.dtype
for axis in axes:
operand = reduced_func(operand, axis=axis, initial=init_value, keepdims=True)
if not keepdims:
operand = np.squeeze(operand, axis=axes)
return operand.astype(op_dtype)
| ivy/ivy/functional/backends/numpy/experimental/general.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/general.py",
"repo_id": "ivy",
"token_count": 357
} | 21 |
"""Collection of NumPy gradient functions, wrapped to fit Ivy syntax and
signature."""
# global
import logging
from typing import Sequence, Union
import ivy
def variable(x, /):
logging.warning(
"NumPy does not support autograd, declaring a 'variable' "
"is identical to declaring an 'array' when using numpy backend."
)
return x
def is_variable(x, /, *, exclusive=False):
# NumPy does not support autograd, checking if x is a variable does have any meaning
# for NumPy. Return False.
return False
def variable_data(x, /):
return x
def execute_with_gradients(
func,
xs,
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
):
logging.warning(
"NumPy does not support autograd, "
"'execute_with_gradients' returns None in place of function gradients."
)
xs = ivy.to_ivy(xs)
func_ret = func(xs)
return func_ret, None
def value_and_grad(func):
logging.warning(
"NumPy does not support autograd, 'value_and_grad' "
"has no effect on the array, as gradients are not supported in the first place."
)
def grad_fn(xs):
grads = ivy.nested_map(
lambda x: ivy.zeros_like(x), xs, include_derived=True, shallow=False
)
y = func(xs)
y = ivy.to_ivy(y)
return y, grads
return grad_fn
def jac(func):
logging.warning(
"NumPy does not support autograd, 'jac' "
"has no effect on the array, as gradients are not supported in the first place."
)
def grad_fn(xs):
jacobian = ivy.nested_map(
lambda x: ivy.zeros_like(x), xs, include_derived=True, shallow=False
)
return jacobian
return grad_fn
def grad(func, argnums=0):
logging.warning(
"NumPy does not support autograd, 'grad' "
"has no effect on the array, as gradients are not supported in the first place."
)
def grad_fn(xs):
grad = ivy.nested_map(
lambda x: ivy.zeros_like(x), xs, include_derived=True, shallow=False
)
y = func(xs)
y = ivy.to_ivy(y)
return grad
return grad_fn
def stop_gradient(x, /, *, preserve_type=True, out=None):
logging.warning(
"NumPy does not support autograd, 'stop_gradient' "
"has no effect on the array, as gradients are not supported in the first place."
)
return x
| ivy/ivy/functional/backends/numpy/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/gradients.py",
"repo_id": "ivy",
"token_count": 1066
} | 22 |
# global
import struct
from numbers import Number
from typing import Union, List, Optional, Sequence, Tuple
import numpy as np
import paddle
import ivy.functional.backends.paddle as paddle_backend
# local
import ivy
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_device_and_dtypes,
)
from ivy.functional.ivy.creation import (
_asarray_to_native_arrays_and_back,
_asarray_infer_device,
_asarray_handle_nestable,
_asarray_infer_dtype,
NestedSequence,
SupportsBufferProtocol,
_asarray_inputs_to_native_shapes,
_remove_np_bfloat16,
)
from . import backend_version
from paddle.device import core
# Array API Standard #
# -------------------#
def arange(
start: float,
/,
stop: Optional[float] = None,
step: float = 1,
*,
dtype: Optional[Union[ivy.Dtype, paddle.dtype]] = None,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if stop is None:
stop = start
start = 0
if (step > 0 and start > stop) or (step < 0 and start < stop):
if isinstance(stop, float):
stop = float(start)
else:
stop = start
if dtype is None:
if isinstance(start, int) and isinstance(stop, int) and isinstance(step, int):
return paddle.arange(start, stop, step, dtype=paddle.int32)
elif (
isinstance(start, float)
or isinstance(stop, float)
or isinstance(step, float)
):
return paddle.arange(start, stop, step, dtype=paddle.float32)
else:
return paddle.arange(start, stop, step)
else:
return paddle.arange(start, stop, step).cast(dtype)
@_asarray_to_native_arrays_and_back
@_asarray_infer_device
@_asarray_handle_nestable
@_asarray_inputs_to_native_shapes
@_asarray_infer_dtype
def asarray(
obj: Union[
paddle.Tensor,
np.ndarray,
bool,
int,
float,
list,
NestedSequence,
SupportsBufferProtocol,
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[Union[ivy.Dtype, paddle.dtype]] = None,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if isinstance(obj, paddle.Tensor):
if copy:
# Checking if the tensor is not empty
# As clone is not supported for empty tensors
if all(obj.shape):
ret = obj.clone().detach()
ret.stop_gradient = obj.stop_gradient
else:
ret = paddle.to_tensor(
obj.detach(),
dtype=dtype,
place=device,
stop_gradient=obj.stop_gradient,
)
else:
ret = obj
ret = ret.astype(dtype) if ret.dtype != obj.dtype else ret
return paddle_backend.to_device(ret, device)
elif isinstance(obj, (Number, bool, complex)):
ret = paddle.to_tensor(obj, dtype=dtype, place=device)
if ret.ndim != 0: # for versions <2.5.0
return ret.squeeze()
else:
return ret
obj = ivy.nested_map(_remove_np_bfloat16, obj, shallow=False)
return paddle.to_tensor(obj, dtype=dtype, place=device)
def empty(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if isinstance(shape, int):
shape = [shape]
return paddle.empty(shape=shape).cast(dtype)
def empty_like(
x: paddle.Tensor,
/,
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.empty(shape=x.shape).cast(dtype)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"uint8",
"int8",
"int16",
"float16",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[int, Sequence[int]]] = None,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if n_cols is None:
n_cols = n_rows
if batch_shape is None:
batch_shape = []
i = paddle.eye(n_rows, n_cols, dtype=dtype)
reshape_dims = [1] * len(batch_shape) + [n_rows, n_cols]
tile_dims = list(batch_shape) + [1, 1]
# handle index of the diagonal k
if k == 0:
return paddle.reshape(i, reshape_dims)
elif -n_rows < k < 0:
mat = paddle.concat(
[
paddle.zeros([-k, n_cols], dtype=dtype),
i[: n_rows + k],
],
0,
)
return paddle.tile(paddle.reshape(mat, reshape_dims), tile_dims)
elif 0 < k < n_cols:
mat = paddle.concat(
[
paddle.zeros([n_rows, k], dtype=dtype),
i[:, : n_cols - k],
],
1,
)
return paddle.tile(paddle.reshape(mat, reshape_dims), tile_dims)
else:
return paddle.zeros(batch_shape + [n_rows, n_cols], dtype=dtype)
def to_dlpack(x, /, *, out: Optional[paddle.Tensor] = None):
return paddle.utils.dlpack.to_dlpack(x)
def from_dlpack(x, /, *, out: Optional[paddle.Tensor] = None):
if hasattr(x, "__dlpack__"):
capsule = x.__dlpack__()
else:
capsule = x
return paddle.utils.dlpack.from_dlpack(capsule)
def full(
shape: Union[ivy.NativeShape, Sequence[int]],
fill_value: Union[int, float, bool],
*,
dtype: Optional[Union[ivy.Dtype, paddle.dtype]] = None,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if dtype is None:
dtype = ivy.default_dtype(item=fill_value)
if not isinstance(shape, Sequence):
shape = [shape]
if isinstance(fill_value, complex):
fill_value = paddle.to_tensor(fill_value)
ret_real = paddle.full(shape=shape, fill_value=fill_value.real())
ret_imag = paddle.full(shape=shape, fill_value=fill_value.imag())
ret = paddle.complex(ret_real, ret_imag)
else:
dtype_ = None if ivy.as_native_dtype(dtype) == paddle.int8 else dtype
ret = paddle.full(shape=shape, fill_value=fill_value, dtype=dtype_)
if ret.dtype != ivy.as_native_dtype(dtype):
return ret.cast(dtype)
return ret
def full_like(
x: paddle.Tensor,
/,
fill_value: Number,
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle_backend.full(
shape=x.shape, fill_value=fill_value, dtype=dtype, device=device
)
def _linspace_helper(start, stop, num, axis=None, *, dtype=None):
num = num.detach().item() if isinstance(num, paddle.Tensor) else num
start_is_array = isinstance(start, paddle.Tensor)
stop_is_array = isinstance(stop, paddle.Tensor)
linspace_method = paddle.linspace
sos_shape = []
if start_is_array:
start_shape = start.shape
sos_shape = start_shape
if num == 1:
if axis is not None:
return paddle_backend.expand_dims(start, axis=axis)
else:
return paddle_backend.expand_dims(start, axis=-1)
start = start.reshape((-1,))
linspace_method = (
_differentiable_linspace if not start.stop_gradient else paddle.linspace
)
if stop_is_array:
stop_shape = stop.shape
sos_shape = stop_shape
if num == 1:
return (
paddle_backend.ones(
stop_shape[:axis] + [1] + stop_shape[axis:], dtype=dtype
)
* start
)
stop = stop.reshape((-1,))
linspace_method = (
_differentiable_linspace if not stop.stop_gradient else paddle.linspace
)
if start_is_array and stop_is_array:
if num < start.shape[0]:
start = paddle_backend.expand_dims(start, axis=-1)
stop = paddle_backend.expand_dims(stop, axis=-1)
diff = paddle_backend.subtract(stop, start)
inc = diff / (num - 1)
res = [start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(stop)
else:
res = [
linspace_method(strt, stp, num)
for strt, stp in zip(
paddle_backend.unstack(start, keepdims=True),
paddle_backend.unstack(stop, keepdims=True),
)
]
elif start_is_array and not stop_is_array:
if num < start.shape[0]:
start = paddle_backend.expand_dims(start, axis=axis)
diff = stop - start
inc = diff / (num - 1)
res = [start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(paddle.ones(start.shape).astype(start.dtype) * stop)
else:
res = [linspace_method(strt, stop, num) for strt in start]
elif not start_is_array and stop_is_array:
if num < stop.shape[0]:
stop = paddle_backend.expand_dims(stop, axis=-1)
diff = stop - start
inc = diff / (num - 1)
res = [paddle.ones(stop.shape).astype(stop.dtype) * start]
res += [start + inc * i for i in range(1, num - 1)]
res.append(stop)
else:
res = [linspace_method(start, stp, num) for stp in stop]
else:
return linspace_method(start, stop, num, dtype=dtype)
res = paddle_backend.concat(res, axis=-1).reshape(sos_shape + [num])
if axis is not None:
ndim = res.ndim
perm = list(range(ndim - 1))
perm.insert(axis % (ndim + 1), ndim - 1)
res = paddle_backend.permute_dims(res, perm)
return res
def _differentiable_linspace(start, stop, num, *, dtype=None):
start = ivy.to_native(start)
num = paddle.to_tensor(num, stop_gradient=False)
if num == 1:
return paddle_backend.expand_dims(start, axis=0)
n_m_1 = paddle_backend.subtract(num, 1)
increment = paddle_backend.divide(paddle_backend.subtract(stop, start), n_m_1)
increment_tiled = paddle_backend.repeat(increment, n_m_1)
increments = paddle_backend.multiply(
increment_tiled,
paddle.linspace(1, n_m_1, n_m_1.cast(paddle.int32), dtype=dtype),
)
if isinstance(start, int) or start.ndim == 0:
start = paddle_backend.expand_dims(start, axis=0)
res = paddle_backend.concat((start, paddle_backend.add(start, increments)), axis=0)
return res.cast(dtype)
def _slice_at_axis(sl, axis):
return (slice(None),) * axis + (sl,) + (...,)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("uint16", "bfloat16", "float16")}}, backend_version
)
def linspace(
start: Union[paddle.Tensor, float],
stop: Union[paddle.Tensor, float],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not isinstance(start, (paddle.Tensor, int)):
start = paddle.to_tensor(start)
if not isinstance(start, (paddle.Tensor, int)):
start = paddle.to_tensor(stop)
if axis is None:
axis = -1
if not endpoint:
if dtype is not None:
ans = _linspace_helper(start, stop, num + 1, axis, dtype=dtype)
else:
ans = _linspace_helper(start, stop, num + 1, axis)
if axis < 0:
axis += len(ans.shape)
ans = paddle_backend.get_item(ans, _slice_at_axis(slice(None, -1), axis))
else:
if dtype is not None:
ans = _linspace_helper(start, stop, num, axis, dtype=dtype)
else:
ans = _linspace_helper(start, stop, num, axis)
if (
endpoint
and ans.shape[0] > 1
and (not isinstance(start, paddle.Tensor))
and (not isinstance(stop, paddle.Tensor))
):
ans[-1] = stop
if (
ans.shape[0] >= 1
and (not isinstance(start, paddle.Tensor))
and (not isinstance(stop, paddle.Tensor))
and ans[0] != start
):
ans[0] = start
if ivy.is_ivy_array(ans):
ans = paddle.to_tensor(ans.data)
if "int" in str(dtype) and paddle.is_floating_point(ans):
ans = paddle.floor(ans)
return ans.cast(dtype)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"float16",
"complex",
"bool",
)
}
},
backend_version,
)
def meshgrid(
*arrays: paddle.Tensor,
sparse: bool = False,
indexing: str = "xy",
out: Optional[paddle.Tensor] = None,
) -> List[paddle.Tensor]:
if len(arrays) == 1:
return arrays
if not sparse:
if indexing == "ij":
return paddle.meshgrid(*arrays)
elif indexing == "xy":
def index_switch(x):
return paddle_backend.swapaxes(x, 0, 1) if x.ndim > 1 else x
arrays = list(map(index_switch, arrays))
ret = paddle.meshgrid(*arrays)
return list(map(index_switch, ret))
else:
raise ValueError(f"indexing must be either 'ij' or 'xy', got {indexing}")
sd = (1,) * len(arrays)
res = [
paddle.reshape(paddle.to_tensor(a), (sd[:i] + (-1,) + sd[i + 1 :]))
for i, a in enumerate(arrays)
]
if indexing == "xy" and len(arrays) > 1:
res[0] = paddle.reshape(res[0], (1, -1) + sd[2:])
res[1] = paddle.reshape(res[1], (-1, 1) + sd[2:])
return res
def ones(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.ones(shape=shape).cast(dtype)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int32",
"int64",
"float64",
"float32",
"complex128",
"complex64",
"bool",
)
}
},
backend_version,
)
def ones_like(
x: paddle.Tensor,
/,
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle_backend.ones(shape=x.shape, dtype=dtype, device=device)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"complex",
)
}
},
backend_version,
)
def tril(
x: paddle.Tensor, /, *, k: int = 0, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.tril(x=x, diagonal=k)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"uint8",
"complex",
)
}
},
backend_version,
)
def triu(
x: paddle.Tensor, /, *, k: int = 0, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return paddle.triu(x=x, diagonal=k)
def zeros(
shape: Union[ivy.NativeShape, Sequence[int]],
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.zeros(shape=shape).cast(dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("uint8", "int8", "int16", "float16", "bfloat16")}},
backend_version,
)
def zeros_like(
x: paddle.Tensor,
/,
*,
dtype: paddle.dtype,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle_backend.zeros(shape=x.shape, dtype=dtype, device=device)
# Extra #
# ------#
array = asarray
def copy_array(
x: paddle.Tensor,
*,
to_ivy_array: bool = True,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if 0 in x.shape:
new_arr = paddle.empty(x.shape, dtype=x.dtype)
else:
new_arr = x.clone()
if to_ivy_array:
return ivy.to_ivy(new_arr)
return new_arr
def one_hot(
indices: paddle.Tensor,
depth: int,
/,
*,
on_value: Optional[paddle.Tensor] = None,
off_value: Optional[paddle.Tensor] = None,
axis: Optional[int] = None,
dtype: Optional[paddle.dtype] = None,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
on_none = on_value is None
off_none = off_value is None
expand_ret = False
if indices.ndim == 0:
expand_ret = True
indices = indices.cast("int64").unsqueeze(0)
if dtype is None:
if on_none and off_none:
dtype = paddle.float32
else:
if not on_none:
dtype = paddle.to_tensor(on_value).dtype
elif not off_none:
dtype = paddle.to_tensor(off_value).dtype
else:
dtype = ivy.as_native_dtype(dtype)
on_value = (
paddle.to_tensor(1.0, dtype="float32")
if on_none
else paddle.to_tensor(on_value, dtype="float32")
)
off_value = (
paddle.to_tensor(0.0, dtype="float32")
if off_none
else paddle.to_tensor(off_value, dtype="float32")
)
res = paddle.nn.functional.one_hot(indices.cast(paddle.int64), depth)
if not on_none or not off_none:
res = paddle.where(res == 1, on_value, off_value)
if axis is not None:
res = paddle.moveaxis(res, -1, axis)
if expand_ret:
res = res.squeeze()
return res.cast(dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("complex64", "complex128")}},
backend_version,
)
def frombuffer(
buffer: bytes,
dtype: paddle.dtype = float,
count: int = -1,
offset: int = 0,
) -> paddle.Tensor:
dtype_bytes = int(ivy.Dtype(dtype).dtype_bits / 8)
if str(dtype) == "bool":
dtype_bytes = 1
dtype_str = str(dtype)
struct_format = {
"bool": "?",
"int8": "b",
"int16": "h",
"int32": "i",
"int64": "q",
"uint8": "B",
"float16": "e",
"float32": "f",
"float64": "d",
}
ret = []
for i in range(0, len(buffer), dtype_bytes):
x = struct.unpack(struct_format[dtype_str], buffer[i : i + dtype_bytes])
ret = ret + list(x)
if offset > 0:
offset = int(offset / dtype_bytes)
if count > -1:
ret = ret[offset : offset + count]
else:
ret = ret[offset:]
ret = paddle.to_tensor(ret, dtype=dtype)
return ret
def triu_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: core.Place = None,
) -> Tuple[paddle.Tensor]:
# special case due to inconsistent behavior when n_cols=1 and n_rows=0
if n_cols == 1 and n_rows == 0:
return paddle.to_tensor([], place=device, dtype="int64"), paddle.to_tensor(
[], place=device, dtype="int64"
)
return tuple(paddle.triu_indices(n_rows, col=n_cols, offset=k, dtype="int64"))
| ivy/ivy/functional/backends/paddle/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/creation.py",
"repo_id": "ivy",
"token_count": 9692
} | 23 |
# global
from typing import Optional
import paddle
import paddle.nn.functional as F
import math
# local
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_device_and_dtypes,
)
from . import backend_version
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def l1_loss(
input: paddle.Tensor,
target: paddle.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> paddle.Tensor:
return F.l1_loss(input, target, reduction=reduction)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def smooth_l1_loss(
input: paddle.Tensor,
target: paddle.Tensor,
/,
*,
beta: Optional[float] = 1.0,
reduction: Optional[str] = "mean",
) -> paddle.Tensor:
return paddle.nn.functional.smooth_l1_loss(
input, target, reduction=reduction, delta=beta
)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def huber_loss(
input: paddle.Tensor,
target: paddle.Tensor,
/,
*,
delta: Optional[float] = 1.0,
) -> paddle.Tensor:
return paddle.fluid.layers.huber_loss(input, target, delta=delta)
@with_unsupported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def soft_margin_loss(
input: paddle.Tensor,
label: paddle.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> paddle.Tensor:
return paddle.nn.functional.soft_margin_loss(input, label, reduction=reduction)
@with_supported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float32", "float64")}},
backend_version,
)
def kl_div(
input: paddle.Tensor,
target: paddle.Tensor,
/,
*,
reduction: Optional[str] = "mean",
log_target=False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if log_target:
target = paddle.exp(target)
loss = F.kl_div(input, target, reduction=reduction)
return loss
def _apply_loss_reduction(loss: paddle.Tensor, reduction: str) -> paddle.Tensor:
if reduction == "sum":
return paddle.sum(loss)
elif reduction == "mean":
return paddle.mean(loss)
else: # reduction == "none"
return loss
def _validate_poisson_nll_params(
input,
label,
epsilon,
reduction,
allowed_dtypes=[paddle.float32, paddle.float64],
):
# Validate dtypes
for parameter, name in zip([input, label], ["input", "label"]):
if parameter.dtype not in allowed_dtypes:
raise ValueError(
f"The dtype of '{name}' in poisson_nll_loss should be one of"
f" {allowed_dtypes}, but received {parameter.dtype}."
)
# Validate epsilon
if epsilon <= 0:
raise ValueError(
"The value of `epsilon` in poisson_nll_loss should be positive, but"
f" received {epsilon}, which is not allowed."
)
# Validate reduction
if reduction not in ["sum", "mean", "none"]:
raise ValueError(
"The value of 'reduction' in poisson_nll_loss should be 'sum', 'mean' or"
f" 'none', but received {reduction}, which is not allowed."
)
# Validate shape
if input.shape != label.shape:
raise ValueError(
f"The shape of 'input' ({input.shape}) must be the same as the shape of"
f" 'label' ({label.shape})."
)
return True
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
# Note: This is a composition function to address an issue with the native
# `paddle.nn.functional.poisson_nll_loss` function. Once PaddlePaddle moves the
# changes from the develop branch to a stable release, this function can be replaced
# by the native implementation.
# Refer to the PR for more details: https://github.com/PaddlePaddle/Paddle/pull/56992
def poisson_nll_loss(
input: paddle.Tensor,
target: paddle.Tensor,
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> paddle.Tensor:
input_arr = paddle.to_tensor(input)
target_arr = paddle.to_tensor(target, dtype=input.dtype)
_validate_poisson_nll_params(input_arr, target_arr, eps, reduction)
if log_input:
loss = paddle.exp(input_arr) - target_arr * input_arr
else:
loss = input_arr - target_arr * paddle.log(input_arr + eps)
if full:
point_five = paddle.to_tensor(0.5, dtype=target_arr.dtype)
two_pi = paddle.to_tensor(2 * math.pi, dtype=target_arr.dtype)
striling_approx_term = (
(target_arr * paddle.log(target_arr))
- target_arr
+ (point_five * paddle.log(two_pi * target_arr))
)
zeroes = paddle.zeros_like(target_arr, dtype=target_arr.dtype)
ones = paddle.ones_like(target_arr, dtype=target_arr.dtype)
cond = paddle.logical_and(target_arr >= zeroes, target_arr <= ones)
loss = loss + paddle.where(cond, zeroes, striling_approx_term)
return _apply_loss_reduction(loss, reduction)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("float16", "float32", "float64"),
}
},
backend_version,
)
def hinge_embedding_loss(
input: paddle.Tensor,
target: paddle.Tensor,
*,
margin: float = 1.0,
reduction: str = "mean",
) -> paddle.Tensor:
return paddle.nn.functional.hinge_embedding_loss(
input,
target,
margin=margin,
reduction=reduction,
)
| ivy/ivy/functional/backends/paddle/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/losses.py",
"repo_id": "ivy",
"token_count": 3363
} | 24 |
"""Collection of Paddle random functions, wrapped to fit Ivy syntax and
signature."""
# global
import paddle
import ivy.functional.backends.paddle as paddle_backend
from typing import Optional, Union, Sequence
# local
import ivy
from paddle.device import core
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_randint_check_dtype_and_bound,
_check_valid_scale,
)
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_device_and_dtypes,
with_unsupported_dtypes,
)
from . import backend_version
# Extra #
# ------#
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8",)}},
backend_version,
)
def random_uniform(
*,
low: Union[float, paddle.Tensor] = 0.0,
high: Union[float, paddle.Tensor] = 1.0,
shape: Optional[Union[paddle.Tensor, ivy.NativeShape, Sequence[int]]] = None,
dtype: paddle.dtype,
device: core.Place = None,
seed=None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
low = paddle.cast(low, "float32") if isinstance(low, paddle.Tensor) else low
high = paddle.cast(high, "float32") if isinstance(high, paddle.Tensor) else high
shape = _check_bounds_and_get_shape(low, high, shape).shape
# Set range and seed
rng = high - low
if seed:
_ = paddle.seed(seed)
random_base = paddle.uniform(shape, min=0.0, max=1.0)
return paddle_backend.add(paddle_backend.multiply(random_base, rng), low).cast(
dtype
)
@with_unsupported_dtypes(
{"2.6.0 and below": ("float16", "int16", "int8")}, backend_version
)
def random_normal(
*,
mean: Union[float, paddle.Tensor] = 0.0,
std: Union[float, paddle.Tensor] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
dtype: paddle.dtype,
seed: Optional[int] = None,
device: core.Place = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
_check_valid_scale(std)
shape = _check_bounds_and_get_shape(mean, std, shape).shape
if seed:
paddle.seed(seed)
return paddle.normal(mean, std, shape).cast(dtype)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
)
}
},
backend_version,
)
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[paddle.Tensor] = None,
replace: bool = True,
device: core.Place = None,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if probs is None:
probs = paddle.ones((batch_size, num_samples)) / population_size
probs = paddle.cast(probs, paddle.float32)
if seed:
paddle.seed(seed)
x = paddle.multinomial(probs, num_samples=num_samples, replacement=replace)
return x
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8",)}},
backend_version,
)
def randint(
low: Union[int, paddle.Tensor],
high: Union[int, paddle.Tensor],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: core.Place = None,
dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
_randint_check_dtype_and_bound(low, high, dtype)
low = paddle.cast(low, "float32") if isinstance(low, paddle.Tensor) else low
high = paddle.cast(high, "float32") if isinstance(high, paddle.Tensor) else high
shape = _check_bounds_and_get_shape(low, high, shape).shape
range = high - low
if seed:
_ = paddle.seed(seed)
_retval = paddle.cast(
paddle.uniform(shape or [1], min=0.0, max=1.0) * range + low, dtype
)
return _retval if shape else _retval.squeeze(axis=0)
def seed(*, seed_value: int = 0):
_ = paddle.seed(seed_value)
return
def shuffle(
x: paddle.Tensor,
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if seed:
_ = paddle.seed(seed)
# Use Paddle's randperm function to generate shuffled indices
indices = paddle.randperm(x.ndim, dtype="int64")
if paddle.is_complex(x):
shuffled_real = paddle.index_select(x.real(), indices, axis=axis)
shuffled_imag = paddle.index_select(x.imag(), indices, axis=axis)
return paddle.complex(shuffled_real, shuffled_imag)
return paddle.index_select(x, indices, axis=axis)
| ivy/ivy/functional/backends/paddle/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/random.py",
"repo_id": "ivy",
"token_count": 2018
} | 25 |
# global
from typing import Union, Optional, Tuple
import tensorflow as tf
# local
from ivy.func_wrapper import with_unsupported_device_and_dtypes, with_unsupported_dtypes
from .. import backend_version
# Array API Standard #
# -------------------#
@with_unsupported_device_and_dtypes(
{"2.15.0 and below": {"cpu": ("bfloat16",)}},
backend_version,
)
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if window_length < 2:
return tf.ones([window_length], dtype=dtype)
if periodic is False:
return tf.signal.kaiser_window(window_length, beta, dtype=dtype)
else:
return tf.signal.kaiser_window(window_length + 1, beta, dtype=dtype)[:-1]
def kaiser_bessel_derived_window(
window_length: int,
beta: float = 12.0,
*,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.signal.kaiser_bessel_derived_window(window_length, beta, dtype)
def vorbis_window(
window_length: Union[tf.Tensor, tf.Variable],
*,
dtype: tf.DType = tf.dtypes.float32,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.signal.vorbis_window(window_length, dtype=dtype, name=None)
def hann_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if size < 2:
return tf.ones([size], dtype=dtype)
if periodic:
return tf.signal.hann_window(size + 1, periodic=False, dtype=dtype)[:-1]
else:
return tf.signal.hann_window(size, periodic=False, dtype=dtype)
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: Optional[str] = None,
) -> Tuple[Union[tf.Tensor, tf.Variable], ...]:
n_cols = n_rows if n_cols is None else n_cols
if n_rows < 0 or n_cols < 0:
n_rows, n_cols = 0, 0
ret = [[], []]
for i in range(-min(k, 0), n_rows, 1):
for j in range(0, min(n_cols, k + i + 1), 1):
ret[0].append(i)
ret[1].append(j)
return tuple(tf.convert_to_tensor(ret, dtype=tf.int64))
def unsorted_segment_min(
data: tf.Tensor,
segment_ids: tf.Tensor,
num_segments: Union[int, tf.Tensor],
) -> tf.Tensor:
return tf.math.unsorted_segment_min(data, segment_ids, num_segments)
def blackman_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if size < 2:
return tf.cast(
tf.ones([size], dtype=tf.experimental.numpy.result_type(size, 0.0)),
dtype=dtype,
)
if periodic:
count = tf.experimental.numpy.arange(size) / size
else:
count = tf.linspace(start=0, stop=size, num=size)
return tf.cast(
(0.42 - 0.5 * tf.cos(2 * tf.experimental.numpy.pi * count))
+ (0.08 * tf.cos(2 * tf.experimental.numpy.pi * 2 * count)),
dtype=dtype,
)
def unsorted_segment_sum(
data: tf.Tensor,
segment_ids: tf.Tensor,
num_segments: Union[int, tf.Tensor],
) -> tf.Tensor:
return tf.math.unsorted_segment_sum(data, segment_ids, num_segments)
@with_unsupported_dtypes({"2.15.0 and below": ("bool",)}, backend_version)
def trilu(
x: Union[tf.Tensor, tf.Variable],
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if upper:
return tf.experimental.numpy.triu(x, k)
return tf.experimental.numpy.tril(x, k)
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 125.0,
upper_edge_hertz: float = 3000.0,
):
return tf.signal.linear_to_mel_weight_matrix(
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz=lower_edge_hertz,
upper_edge_hertz=upper_edge_hertz,
)
def unsorted_segment_mean(
data: tf.Tensor,
segment_ids: tf.Tensor,
num_segments: Union[int, tf.Tensor],
) -> tf.Tensor:
return tf.math.unsorted_segment_mean(data, segment_ids, num_segments)
@with_unsupported_dtypes(
{"2.13.0 and below": ("bool", "bfloat16", "float16", "complex")}, backend_version
)
def polyval(coeffs: tf.Tensor, x: tf.Tensor):
result = tf.experimental.numpy.polyval(
coeffs,
x,
)
return result
| ivy/ivy/functional/backends/tensorflow/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/creation.py",
"repo_id": "ivy",
"token_count": 2141
} | 26 |
from typing import Union, Optional, Tuple, Sequence
import tensorflow as tf
from tensorflow.python.ops.numpy_ops import np_math_ops
import ivy
from ivy import (
with_unsupported_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
from .. import backend_version
# from ivy.functional.backends.paddle.experimental.statistical import to_positive_axis
from copy import deepcopy
def histogram(
a: tf.Tensor,
/,
*,
bins: Optional[Union[int, tf.Tensor]] = None,
axis: Optional[int] = None,
extend_lower_interval: Optional[bool] = False,
extend_upper_interval: Optional[bool] = False,
dtype: Optional[tf.DType] = None,
range: Optional[Tuple[float]] = None,
weights: Optional[tf.Tensor] = None,
density: Optional[bool] = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[tf.Tensor]:
# TODO: Implement in pure tensorflow
pass
@with_supported_dtypes(
{
"2.15.0 and below": (
"float",
"complex",
)
},
backend_version,
)
def median(
input: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
pass
# TODO: Implement in pure tensorflow
@with_supported_dtypes(
{
"2.15.0 and below": (
"float",
"complex",
)
},
backend_version,
)
def nanmean(
a: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
np_math_ops.enable_numpy_methods_on_tensor()
return tf.experimental.numpy.nanmean(a, axis=axis, keepdims=keepdims, dtype=dtype)
def nanmin(
a: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: Optional[bool] = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
axis = tuple(axis) if isinstance(axis, list) else axis
nan_mask = tf.math.is_nan(a)
if where is not None:
nan_mask = tf.math.logical_or(nan_mask, tf.math.logical_not(where))
masked_tensor = tf.where(nan_mask, tf.constant(float("inf"), dtype=a.dtype), a)
if axis is None:
result = tf.math.reduce_min(masked_tensor, keepdims=keepdims)
else:
result = tf.math.reduce_min(masked_tensor, axis=axis, keepdims=keepdims)
if initial is not None:
result = tf.minimum(result, initial)
return result
def _infer_dtype(dtype: tf.DType):
default_dtype = ivy.infer_default_dtype(dtype)
if ivy.dtype_bits(dtype) < ivy.dtype_bits(default_dtype):
return default_dtype
return dtype
def nanprod(
a: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[tf.DType] = None,
keepdims: Optional[bool] = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
np_math_ops.enable_numpy_methods_on_tensor()
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(a.dtype)
if initial is None:
initial = 1
axis = tuple(axis) if isinstance(axis, list) else axis
return (
tf.experimental.numpy.nanprod(a, axis=axis, keepdims=keepdims, dtype=dtype)
* initial
)
def _validate_quantile(q):
if tf.experimental.numpy.ndim(q) == 1 and tf.size(q) < 10:
for i in range(tf.size(q)):
if not (0.0 <= q[i] <= 1.0):
return False
else:
if not (tf.math.reduce_all(q >= 0) and tf.math.reduce_all(q <= 1)):
return False
return True
def to_positive_axis(axis, ndim):
if not isinstance(axis, (list, tuple)):
axis = [axis]
if len(axis) == 0:
raise ValueError("Axis can't be empty!")
if len(set(axis)) != len(axis):
raise ValueError("Duplicated axis!")
for i in range(len(axis)):
if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):
raise ValueError("Axis must be int in range [-rank(x), rank(x))")
if axis[i] < 0:
axis[i] += ndim
return axis
def _handle_axis(a, q, fn, keepdims=False, axis=None):
nd = tf.experimental.numpy.ndim(a)
axis_arg = deepcopy(axis)
if axis is not None:
axis = to_positive_axis(axis, nd)
if len(axis) == 1:
axis_arg = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
for i, s in enumerate(sorted(keep)):
a = tf.experimental.numpy.moveaxis(a, s, i)
a = tf.reshape(
a,
[
*a.shape[:nkeep],
-1,
],
)
axis_arg = -1
ret = fn(a, q, axis=axis_arg)
if keepdims:
if axis is None:
index_ret = (None,) * nd
else:
index_ret = tuple(None if i in axis else slice(None) for i in range(nd))
ret = ret[(Ellipsis,) + index_ret]
return ret
def _quantile(a, q, axis=None):
ret_dtype = a.dtype
if tf.experimental.numpy.ndim(q) > 1:
raise ValueError("q argument must be a scalar or 1-dimensional!")
if axis is None:
axis = 0
a = tf.reshape(a, [-1])
elif axis != 0:
a = tf.experimental.numpy.moveaxis(a, axis, 0)
axis = 0
n = a.shape[axis]
indices = q * (n - 1)
a = tf.sort(a, axis)
indices_below = tf.cast(tf.math.floor(indices), dtype=tf.int32)
indices_upper = tf.cast(tf.math.ceil(indices), dtype=tf.int32)
weights = indices - tf.cast(indices_below, dtype=ret_dtype)
indices_below = tf.clip_by_value(indices_below, 0, n - 1)
indices_upper = tf.clip_by_value(indices_upper, 0, n - 1)
tensor_upper = tf.gather(a, indices_upper, axis=axis)
tensor_below = tf.gather(a, indices_below, axis=axis)
pred = weights <= 0.5
out = tf.where(pred, tensor_below, tensor_upper)
return tf.cast(out, ret_dtype)
def quantile(
a: Union[tf.Tensor, tf.Variable],
q: Union[tf.Tensor, float],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
interpolation: str = "linear",
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
pass
# TODO: Implement in pure tensorflow
def corrcoef(
x: tf.Tensor,
/,
*,
y: tf.Tensor,
rowvar: bool = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> tf.Tensor:
if y is None:
xarr = x
else:
axis = 0 if rowvar else 1
xarr = tf.concat([x, y], axis=axis)
if rowvar:
mean_t = tf.reduce_mean(xarr, axis=1, keepdims=True)
cov_t = ((xarr - mean_t) @ tf.transpose(xarr - mean_t)) / (x.shape[1] - 1)
else:
mean_t = tf.reduce_mean(xarr, axis=0, keepdims=True)
cov_t = (tf.transpose(xarr - mean_t) @ (xarr - mean_t)) / (x.shape[1] - 1)
cov2_t = tf.linalg.diag(1 / tf.sqrt(tf.linalg.diag_part(cov_t)))
cor = cov2_t @ cov_t @ cov2_t
return cor
def nanmedian(
input: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[Tuple[int], int]] = None,
keepdims: bool = False,
overwrite_input: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
pass
# TODO: Implement in pure tensorflow
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": (
"int64",
"int32",
"float32",
"float64",
),
"gpu": (
"int64",
"int32",
"float32",
"float64",
),
}
},
backend_version,
)
def bincount(
x: Union[tf.Tensor, tf.Variable],
/,
*,
weights: Optional[Union[tf.Tensor, tf.Variable]] = None,
minlength: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.math.bincount(
x,
weights=weights,
minlength=minlength,
dtype=x.dtype if weights is None else weights.dtype,
)
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("bfloat16", "float16", "float32", "float64"),
}
},
backend_version,
)
def igamma(
a: tf.Tensor, /, *, x: tf.Tensor, out: Optional[tf.Tensor] = None
) -> tf.Tensor:
return tf.math.igamma(a, x)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def cov(
x1: tf.Tensor,
x2: tf.Tensor = None,
/,
*,
rowVar: bool = True,
bias: bool = False,
ddof: Optional[int] = None,
fweights: Optional[tf.Tensor] = None,
aweights: Optional[tf.Tensor] = None,
dtype: Optional[type] = None,
) -> tf.Tensor:
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
if len(tf.shape(x1)) > 2:
raise ValueError("x1 has more than 2 dimensions")
if x2 is not None:
if len(tf.shape(x2)) > 2:
raise ValueError("x2 has more than 2 dimensions")
if dtype is None:
if x2 is None:
dtype = tf.experimental.numpy.result_type(x1, tf.float64)
else:
dtype = tf.experimental.numpy.result_type(x1, x2, tf.float64)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
X = tf.experimental.numpy.array(x1, ndmin=2, dtype=dtype)
if not rowVar and tf.shape(X)[0] != 1:
X = tf.transpose(X)
if x2 is not None:
x2 = tf.experimental.numpy.array(x2, copy=False, ndmin=2, dtype=dtype)
if not rowVar and tf.shape(x2)[0] != 1:
x2 = tf.transpose(x2)
X = tf.concat([X, x2], axis=0)
w = None
if fweights is not None:
fweights = tf.cast(fweights, dtype=tf.float64)
if not tf.reduce_all(fweights == tf.round(fweights)):
raise TypeError("fweights must be integer")
if len(tf.shape(fweights)) > 1:
raise RuntimeError("fweights must be 1 dimensional")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
if tf.experimental.numpy.any(fweights < 0):
raise ValueError("fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = tf.cast(aweights, dtype=tf.float64)
if len(tf.shape(aweights)) > 1:
raise RuntimeError("aweights must be 1 dimensional")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
if tf.experimental.numpy.any(aweights < 0):
raise ValueError("aweights cannot be negative")
if w is None:
w = aweights
else:
w = w * aweights
avg, w_sum = tf.experimental.numpy.average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
fact = tf.shape(X)[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof * sum(w * aweights) / w_sum
if fact <= 0:
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = tf.transpose(X)
else:
X_T = tf.transpose(X * w)
fact = tf.cast(fact, tf.as_dtype(dtype))
c = tf.matmul(X, tf.math.conj(X_T))
return tf.math.truediv(c, fact)
@with_unsupported_dtypes(
{"2.15.0 and below": ("bool",)},
backend_version,
)
def cummax(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
if x.dtype in (tf.complex128, tf.complex64):
x = tf.math.real(x)
if exclusive or reverse:
if exclusive and reverse:
x, indices = __find_cummax(
tf.experimental.numpy.flip(x, axis=axis), axis=axis
)
x, indices = tf.experimental.numpy.swapaxes(
x, axis, -1
), tf.experimental.numpy.swapaxes(indices, axis, -1)
x, indices = tf.experimental.numpy.concatenate(
(tf.experimental.numpy.zeros_like(x[..., -1:]), x[..., :-1]), -1
), tf.experimental.numpy.concatenate(
(
tf.experimental.numpy.zeros_like(indices[..., -1:]),
indices[..., :-1],
),
-1,
)
x, indices = tf.experimental.numpy.swapaxes(
x, axis, -1
), tf.experimental.numpy.swapaxes(indices, axis, -1)
res, indices = tf.experimental.numpy.flip(
x, axis=axis
), tf.experimental.numpy.flip(indices, axis=axis)
elif exclusive:
x = tf.experimental.numpy.swapaxes(x, axis, -1)
x = tf.experimental.numpy.concatenate(
(tf.experimental.numpy.zeros_like(x[..., -1:]), x[..., :-1]), -1
)
x = tf.experimental.numpy.swapaxes(x, axis, -1)
res, indices = __find_cummax(x, axis=axis)
elif reverse:
x = tf.experimental.numpy.flip(x, axis=axis)
x, indices = __find_cummax(x, axis=axis)
res, indices = tf.experimental.numpy.flip(
x, axis=axis
), tf.experimental.numpy.flip(indices, axis=axis)
return res, indices
return __find_cummax(x, axis=axis)
def __find_cummax(x: tf.Tensor, axis: int = 0) -> Tuple[tf.Tensor, tf.Tensor]:
values, indices = [], []
if (
isinstance(x[0], tf.Tensor)
and isinstance(x[0].numpy().tolist(), list)
and len(x[0].numpy().tolist()) >= 1
):
if axis >= 1:
for ret1 in x:
value, indice = __find_cummax(ret1, axis=axis - 1)
indices.append(indice)
values.append(value)
else:
x_list = x.numpy()
z_list = __get_index(x_list.tolist())
indices, values, n1 = x_list.copy(), x_list.copy(), {}
indices.fill(0)
values.fill(0)
z_list = sorted(z_list, key=lambda i: i[1])
for y, y_index in z_list:
multi_index = y_index
if tuple(multi_index[1:]) not in n1:
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
values[y_index] = y
elif (
y
>= x_list[
tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))
]
):
n1[tuple(multi_index[1:])] = multi_index[0]
indices[y_index] = multi_index[0]
values[y_index] = y
else:
indices[y_index] = n1[tuple(multi_index[1:])]
values[y_index] = x_list[
tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))
]
else:
x_indices = tf.convert_to_tensor(list(range(0, x.shape[0])), dtype=x.dtype)
values, indices = tf.scan(
lambda a, b: (
a
if a > b
or tf.experimental.numpy.where(x[0].numpy() == b[0].numpy()) == 0
else b
),
(x, x_indices),
)
return tf.convert_to_tensor(values, dtype=x.dtype), tf.cast(
tf.convert_to_tensor(indices), dtype=tf.int64
)
def __get_index(lst, indices=None, prefix=None):
if indices is None:
indices = []
if prefix is None:
prefix = []
if isinstance(lst, list):
for i, sub_lst in enumerate(lst):
sub_indices = prefix + [i]
__get_index(sub_lst, indices, sub_indices)
else:
indices.append((lst, tuple(prefix)))
return indices
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "complex")},
backend_version,
)
def cummin(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(dtype)
if reverse:
x = tf.reverse(x, axis=[axis])
x_unstacked = tf.unstack(x, axis=axis)
cummin_x_unstacked = []
cummin_x_unstacked.append(x_unstacked[0])
for i, x_sub in enumerate(x_unstacked[1:]):
cummin_x_sub = tf.minimum(cummin_x_unstacked[i], x_sub)
cummin_x_unstacked.append(cummin_x_sub)
cummin_x = tf.stack(cummin_x_unstacked, axis=axis)
if reverse:
cummin_x = tf.reverse(cummin_x, axis=[axis])
if dtype is None:
return cummin_x
else:
return tf.cast(cummin_x, dtype)
| ivy/ivy/functional/backends/tensorflow/experimental/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/statistical.py",
"repo_id": "ivy",
"token_count": 8835
} | 27 |
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_check_shapes_broadcastable,
)
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import tensorflow as tf
from tensorflow.python.framework.dtypes import DType
from typing import Optional, Sequence, Union
from .... import backend_version
import ivy
def beta(
alpha: Union[float, tf.Tensor, tf.Variable],
beta: Union[float, tf.Tensor, tf.Variable],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[Union[ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not dtype:
dtype = ivy.default_float_dtype()
dtype = ivy.as_native_dtype(dtype)
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
alpha = tf.cast(alpha, dtype)
beta = tf.cast(beta, dtype)
return tfp.distributions.Beta(alpha, beta).sample(shape, seed=seed)
def gamma(
alpha: Union[float, tf.Tensor, tf.Variable],
beta: Union[float, tf.Tensor, tf.Variable],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: Optional[Union[DType, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not dtype:
dtype = ivy.default_float_dtype()
dtype = ivy.as_native_dtype(dtype)
shape = _check_bounds_and_get_shape(alpha, beta, shape).shape
alpha = tf.cast(alpha, dtype)
beta = tf.cast(beta, dtype)
return tfp.distributions.Gamma(alpha, beta).sample(shape, seed=seed)
def bernoulli(
probs: Union[float, tf.Tensor, tf.Variable],
*,
logits: Union[float, tf.Tensor, tf.Variable] = None,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: Optional[str] = None,
dtype: DType,
seed: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = dtype if dtype is not None else probs.dtype
if seed is not None:
tf.random.set_seed(seed)
if logits is not None:
logits = tf.cast(logits, dtype)
if not _check_shapes_broadcastable(shape, logits.shape):
shape = logits.shape
elif probs is not None:
probs = tf.cast(probs, dtype)
if not _check_shapes_broadcastable(shape, probs.shape):
shape = probs.shape
return tfp.distributions.Bernoulli(
logits=logits, probs=probs, dtype=dtype, allow_nan_stats=True
).sample(shape, seed)
# dirichlet
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"blfoat16",
"float16",
)
},
backend_version,
)
def dirichlet(
alpha: Union[tf.Tensor, tf.Variable, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
seed: Optional[int] = None,
dtype: Optional[tf.Tensor] = None,
) -> Union[tf.Tensor, tf.Variable]:
size = size if size is not None else len(alpha)
if dtype is None:
dtype = tf.float64
else:
dtype = dtype
if seed is not None:
tf.random.set_seed(seed)
return tf.cast(
tfd.Dirichlet(
concentration=alpha,
validate_args=False,
allow_nan_stats=True,
force_probs_to_zero_outside_support=False,
name="Dirichlet",
).sample(size),
dtype=dtype,
)
| ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/experimental/random.py",
"repo_id": "ivy",
"token_count": 1608
} | 28 |
# global
from typing import Optional, Union, Tuple, List, Sequence
from numbers import Number
import torch
# local
import ivy
from ivy import promote_types_of_inputs
from ivy.functional.backends.torch.elementwise import _cast_for_unary_op
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_dtypes,
)
from .. import backend_version
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def amax(
x: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
axis = tuple(axis) if isinstance(axis, list) else axis
return torch.amax(x, dim=axis, keepdim=keepdims)
amax.support_native_out = True
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex64",
"complex128",
)
},
backend_version,
)
def amin(
x: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
axis = tuple(axis) if isinstance(axis, list) else axis
return torch.amin(x, dim=axis, keepdim=keepdims)
amin.support_native_out = True
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, backend_version)
def lgamma(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor:
return torch.lgamma(x, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def fmax(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = promote_types_of_inputs(x1, x2)
return torch.fmax(x1, x2, out=None)
fmax.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def sinc(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor:
x = _cast_for_unary_op(x)
return torch.sinc(x, out=out)
sinc.support_native_out = True
def float_power(
x1: Union[torch.Tensor, float, list, tuple],
x2: Union[torch.Tensor, float, list, tuple],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# Native out is supported but with restrictions leading
# to failures hence letting ivy handle it.
x1, x2 = promote_types_of_inputs(x1, x2)
return torch.float_power(x1, x2, out=out)
float_power.support_native_out = True
def copysign(
x1: Union[torch.Tensor, Number],
x2: Union[torch.Tensor, Number],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = promote_types_of_inputs(x1, x2)
if not ivy.is_float_dtype(x1):
x1 = x1.type(ivy.default_float_dtype(as_native=True))
x2 = x2.type(ivy.default_float_dtype(as_native=True))
return torch.copysign(torch.as_tensor(x1), x2, out=out)
copysign.support_native_out = True
def count_nonzero(
a: torch.Tensor,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if isinstance(axis, list):
axis = tuple(axis)
if dtype is None:
x = torch.count_nonzero(a, dim=axis)
else:
x = torch.tensor(torch.count_nonzero(a, dim=axis), dtype=dtype)
if not keepdims:
return x
if isinstance(axis, int):
if axis == -1:
temp = x.dim() - 1
if temp < -1:
temp = 0
return x.unsqueeze(temp)
return x.unsqueeze(axis)
elif axis is not None:
for d in sorted(axis):
x = x.unsqueeze(d)
return x
return x
count_nonzero.support_native_out = False
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, backend_version)
def nansum(
x: torch.Tensor,
/,
*,
axis: Optional[Union[Tuple[int, ...], int]] = None,
dtype: Optional[torch.dtype] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dtype = ivy.as_native_dtype(dtype)
return torch.nansum(x, dim=axis, keepdim=keepdims, dtype=dtype)
nansum.support_native_out = False
def isclose(
a: torch.Tensor,
b: torch.Tensor,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
isclose.support_native_out = False
def diff(
x: Union[torch.Tensor, list, tuple],
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[torch.Tensor, int, float, list, tuple]] = None,
append: Optional[Union[torch.Tensor, int, float, list, tuple]] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = x if isinstance(x, torch.Tensor) else torch.tensor(x)
prepend = (
prepend
if isinstance(prepend, torch.Tensor) or prepend is None
else torch.tensor(prepend)
)
append = (
append
if isinstance(append, torch.Tensor) or append is None
else torch.tensor(append)
)
return torch.diff(x, n=n, dim=axis, prepend=prepend, append=append)
def signbit(
x: Union[torch.Tensor, float, int, list, tuple],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.signbit(x, out=out)
signbit.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def hypot(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.hypot(x1, x2)
def allclose(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[torch.Tensor] = None,
) -> bool:
ret = torch.allclose(x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan)
return torch.tensor(ret)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def fix(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.fix(x, out=out)
fix.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def nextafter(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nextafter(x1, x2)
nextafter.support_native_out = True
def zeta(
x: torch.Tensor,
q: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
temp = torch.logical_and(torch.ne(torch.remainder(x, 2), 0), torch.gt(x, 1))
temp = torch.logical_and(temp, torch.le(q, 0))
nan_indices = torch.logical_or(temp, torch.lt(x, 1))
result = torch.special.zeta(x, q)
result.masked_fill_(nan_indices, float("nan"))
return result
zeta.support_native_out = False
def gradient(
x: torch.Tensor,
/,
*,
spacing: Union[int, list, tuple] = 1,
axis: Optional[Union[int, list, tuple]] = None,
edge_order: int = 1,
) -> Union[torch.Tensor, List[torch.Tensor]]:
if axis is None:
axis = tuple(range(len(x.shape)))
if isinstance(axis, int):
axis = (axis,)
if isinstance(spacing, int):
spacing = [spacing] * len(axis)
grad = torch.gradient(x, spacing=spacing, dim=axis, edge_order=edge_order)
if len(grad) == 1:
return grad[0]
return grad
@with_supported_dtypes(
{"2.2 and below": ("float16", "float32", "float64")},
backend_version,
)
def xlogy(
x: torch.tensor, y: torch.tensor, /, *, out: Optional[torch.tensor] = None
) -> torch.tensor:
x, y = promote_types_of_inputs(x, y)
return torch.xlogy(x, y, out=out)
def conj(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
conj_x = torch.conj(x)
return torch.resolve_conj(input=conj_x)
def ldexp(
x1: torch.Tensor,
x2: Union[int, torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.ldexp(x1, x2, out=out)
def _are_suitable_types_for_torch_lerp(input, end, weight):
suitable_types = [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
]
if not isinstance(input, torch.Tensor) or not isinstance(end, torch.Tensor):
return False
else:
if input.dtype not in suitable_types or end.dtype not in suitable_types:
return False
if not isinstance(weight, float) and not isinstance(weight, torch.Tensor):
return False
else:
if isinstance(weight, torch.Tensor):
if weight.dtype not in [
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
]:
return False
return True
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def lerp(
input: torch.Tensor,
end: torch.Tensor,
weight: Union[torch.Tensor, float],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.lerp(input, end, weight, out=out)
lerp.partial_mixed_handler = lambda input, end, weight, **kwargs: (
_are_suitable_types_for_torch_lerp(input, end, weight)
)
lerp.support_native_out = True
def frexp(
x: torch.Tensor,
/,
*,
out: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
mantissa, exponent = torch.frexp(x, out=out)
return mantissa, exponent
def modf(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
fractional_part = torch.frac(x)
integer_part = torch.floor(x)
return fractional_part, integer_part
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def digamma(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.special.digamma(x, out=out)
digamma.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def erfc(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.special.erfc(x)
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def erfinv(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.special.erfinv(x, out=out)
erfinv.support_native_out = True
| ivy/ivy/functional/backends/torch/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 4910
} | 29 |
"""Collection of PyTorch gradient functions, wrapped to fit Ivy syntax and
signature."""
# global
import torch
from typing import Optional, Callable, Sequence, Union
# local
import ivy
from ivy.func_wrapper import (
outputs_to_ivy_arrays,
inputs_to_native_arrays,
)
from ivy.functional.ivy.gradients import (
_get_required_float_variables,
_get_y_and_ret_idxs,
_get_native_y,
_set_duplicates,
_process_func_ret_and_grads,
)
def variable(x, /):
if ivy.is_int_dtype(x.dtype):
x = ivy.astype(x, ivy.default_float_dtype()).to_native()
if not x.is_leaf:
return x.detach().requires_grad_()
return x.clone().requires_grad_()
def is_variable(x, /, *, exclusive: bool = False):
return isinstance(x, torch.Tensor) and x.requires_grad
def variable_data(x: torch.Tensor, /) -> torch.Tensor:
return x.data
def _grad_func(y, xs, retain_grads):
"""Gradient calculation function."""
# Creating a zero gradient nest for the case where no gradients are computed
grads_ = ivy.nested_map(
lambda x: ivy.to_native(ivy.zeros_like(x)),
xs,
include_derived=True,
shallow=False,
)
# Gradient calculation
if isinstance(xs, ivy.NativeArray):
grads = torch.autograd.grad(
y,
xs,
retain_graph=True,
create_graph=retain_grads,
allow_unused=True,
)[0]
grads = grads_ if grads is None else grads
elif isinstance(xs, ivy.Container):
grads = xs.cont_from_flat_list(
list(
torch.autograd.grad(
[y],
[v for k, v in xs.cont_to_iterator()],
retain_graph=True,
create_graph=retain_grads,
allow_unused=True,
)
)
)
# Returning zeros if no gradients are computed for consistent results
if isinstance(grads, ivy.Container):
grads = ivy.nested_map(
lambda x: 0 if x is None else x, grads, include_derived=True
)
grads += grads_
else:
grads = grads_ if grads is None else grads
else:
def grad_(x):
grad = torch.autograd.grad(
y,
x,
retain_graph=True,
create_graph=retain_grads,
allow_unused=True,
)[0]
return grad if grad is not None else 0
grads = ivy.nested_map(grad_, xs, include_derived=True, shallow=False)
grads = ivy.nested_multi_map(lambda x, _: (x[0] + x[1]), [grads, grads_])
return grads
def execute_with_gradients(
func,
xs: torch.Tensor,
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
):
# Conversion of required arrays to float variables and duplicate index chains
xs, xs_grad_idxs, xs1, required_duplicate_index_chains, _ = (
_get_required_float_variables(xs, xs_grad_idxs)
)
func_ret = func(xs)
xs = xs1
# Getting the relevant outputs from the function return for gradient calculation
ret_grad_idxs, y, ret_idxs = _get_y_and_ret_idxs(
func_ret, ret_grad_idxs, create_var=True
)
if isinstance(y, ivy.NativeArray):
# Gradient calculation for a single output
grads = _set_duplicates(
_grad_func(torch.clone(y), xs, retain_grads),
required_duplicate_index_chains,
)
else:
# Gradient calculation for multiple outputs
# ToDo: use functorch.jacrev if it fixes the issue with broken memory reference
y = _get_native_y(y)
grad_arr_idxs = ivy.nested_argwhere(y, lambda x: ivy.is_native_array(x))
grad_arr_values = ivy.multi_index_nest(y, grad_arr_idxs)
grads_ = [
_grad_func(torch.clone(arr_value), xs, retain_grads)
for arr_value in grad_arr_values
]
grads = grads_
if isinstance(ret_idxs, list) and len(ret_idxs):
grads = {
ret_idxs[i]: _set_duplicates(grad, required_duplicate_index_chains)
for i, grad in enumerate(grads_)
}
# Stop further gradient propagation if not retaining gradients
return _process_func_ret_and_grads(func_ret, grads, retain_grads)
def value_and_grad(func):
def grad_fn(xs):
return ivy.to_native(func(xs))
def callback_fn(xs):
y = grad_fn(xs)
def autograd_fn(x):
x = ivy.to_native(x)
grad = torch.autograd.grad(y, x, allow_unused=True)[0]
grad = (
grad
if grad is not None
else ivy.to_native(ivy.zeros_like(ivy.to_ivy(x)))
)
grad = ivy.to_ivy(grad)
return grad
grads = ivy.nested_map(autograd_fn, xs, include_derived=True, shallow=False)
y = ivy.to_ivy(y)
return y, grads
return callback_fn
def stop_gradient(
x: Optional[torch.Tensor],
/,
*,
preserve_type: bool = True,
out: Optional[torch.Tensor] = None,
):
if is_variable(x) and preserve_type:
if x.grad_fn:
x = x.detach()
x.requires_grad = True
elif x.grad:
x.grad.data.zero_()
return x
return x.detach()
def jac(func: Callable):
def grad_fn(x_in):
return ivy.to_native(
func(ivy.to_ivy(x_in, nested=True)), nested=True, include_derived=True
)
def callback_fn(x_in):
return ivy.to_ivy(
torch.func.jacfwd(grad_fn)(ivy.to_native(x_in, nested=True)),
nested=True,
include_derived=True,
)
return callback_fn
def grad(f, argnums=0):
if grad.nth == 0:
grad.f_original = f
def _nth_derivative(n):
@outputs_to_ivy_arrays
@inputs_to_native_arrays
def _inner(*args, **kwargs):
max_argnum = argnums if isinstance(argnums, int) else max(argnums)
if max_argnum >= len(args):
raise TypeError(
f"differentiating with respect to {argnums=} requires at least "
f"{max_argnum + 1} positional arguments to be passed by the "
f"caller, but got only {len(args)} positional arguments."
)
if isinstance(argnums, int):
x = args[argnums]
x.requires_grad_()
elif isinstance(argnums, (tuple, list)):
x = []
for i in argnums:
x.append(args[i])
[arr.requires_grad_() for arr in x]
else:
raise TypeError(
"argnums should be passed as int or a list/tuple of ints."
f" Found {type(argnums)}"
)
if n == 0:
ret = (
grad.f_original(*args, **kwargs)
if grad.f_original is not None
else f(*args, **kwargs)
)
grad.nth = 0
return ret
else:
y = _nth_derivative(n - 1)(*args, **kwargs)
# Avoid zero gradients setting requires_grads as False
if isinstance(y, tuple):
y_ones = tuple(torch.ones_like(y_) for y_ in y)
[y_.requires_grad_() for y_ in y if y_.requires_grad is False]
elif y.requires_grad is False:
y.requires_grad_()
else:
y_ones = torch.ones_like(y)
dy_dx = torch.autograd.grad(
y, x, create_graph=True, grad_outputs=y_ones, allow_unused=True
)
if dy_dx is None:
return torch.zeros_like(y)
return dy_dx
return _inner
grad.nth += 1
return _nth_derivative(grad.nth)
grad.f_original = None
grad.nth = 0
| ivy/ivy/functional/backends/torch/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/gradients.py",
"repo_id": "ivy",
"token_count": 4244
} | 30 |
# global
import torch
from typing import Union, Optional, Sequence
def all(
x: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = x.type(torch.bool)
if axis is None:
num_dims = len(x.shape)
axis = list(range(num_dims))
if isinstance(axis, int):
return torch.all(x, dim=axis, keepdim=keepdims, out=out)
dims = len(x.shape)
axis = [i % dims for i in axis]
axis.sort()
for i, a in enumerate(axis):
x = torch.all(x, dim=a if keepdims else a - i, keepdim=keepdims, out=out)
return x
all.support_native_out = True
def any(
x: torch.Tensor,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = torch.as_tensor(x).type(torch.bool)
if axis is None:
num_dims = len(x.shape)
axis = list(range(num_dims))
if isinstance(axis, int):
return torch.any(x, dim=axis, keepdim=keepdims, out=out)
dims = len(x.shape)
axis = [i % dims for i in axis]
axis.sort()
for i, a in enumerate(axis):
x = torch.any(x, dim=a if keepdims else a - i, keepdim=keepdims, out=out)
return x
any.support_native_out = True
| ivy/ivy/functional/backends/torch/utility.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/utility.py",
"repo_id": "ivy",
"token_count": 614
} | 31 |
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes
@to_ivy_arrays_and_back
def cholesky(x, /, *, symmetrize_input=True):
def symmetrize(x):
# TODO : Take Hermitian transpose after complex numbers added
return (x + ivy.swapaxes(x, -1, -2)) / 2
if symmetrize_input:
x = symmetrize(x)
return ivy.cholesky(x)
@to_ivy_arrays_and_back
def eig(x, /, *, compute_left_eigenvectors=True, compute_right_eigenvectors=True):
return ivy.eig(x)
@to_ivy_arrays_and_back
def eigh(x, /, *, lower=True, symmetrize_input=True, sort_eigenvalues=True):
UPLO = "L" if lower else "U"
def symmetrize(x):
# TODO : Take Hermitian transpose after complex numbers added
return (x + ivy.swapaxes(x, -1, -2)) / 2
if symmetrize_input:
x = symmetrize(x)
return ivy.eigh(x, UPLO=UPLO)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"0.4.14 and below": ("bfloat16",)}, "jax")
def qr(x, /, *, full_matrices=False):
mode = "reduced"
if full_matrices is True:
mode = "complete"
return ivy.qr(x, mode=mode)
@to_ivy_arrays_and_back
def svd(x, /, *, full_matrices=True, compute_uv=True):
if not compute_uv:
return ivy.svdvals(x)
return ivy.svd(x, full_matrices=full_matrices)
| ivy/ivy/functional/frontends/jax/lax/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/lax/linalg.py",
"repo_id": "ivy",
"token_count": 630
} | 32 |
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
handle_jax_dtype,
)
from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
from ivy.functional.backends.jax.experimental.elementwise import _normalize_axis_tuple
@to_ivy_arrays_and_back
def argmin(a, axis=None, out=None, keepdims=None):
return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)
@to_ivy_arrays_and_back
def average(a, axis=None, weights=None, returned=False, keepdims=False):
# canonicalize_axis to ensure axis or the values in axis > 0
if isinstance(axis, (tuple, list)):
a_ndim = len(ivy.shape(a))
new_axis = [0] * len(axis)
for i, v in enumerate(axis):
if not -a_ndim <= v < a_ndim:
raise ValueError(
f"axis {v} is out of bounds for array of dimension {a_ndim}"
)
new_axis[i] = v + a_ndim if v < 0 else v
axis = tuple(new_axis)
if weights is None:
ret = ivy.mean(a, axis=axis, keepdims=keepdims)
if axis is None:
fill_value = int(a.size) if ivy.is_int_dtype(ret) else float(a.size)
weights_sum = ivy.full((), fill_value, dtype=ret.dtype)
else:
if isinstance(axis, tuple):
# prod with axis has dtype Sequence[int]
fill_value = 1
for d in axis:
fill_value *= a.shape[d]
else:
fill_value = a.shape[axis]
weights_sum = ivy.full_like(ret, fill_value=fill_value)
else:
a = ivy.asarray(a, copy=False)
weights = ivy.asarray(weights, copy=False)
a, weights = promote_types_of_jax_inputs(a, weights)
a_shape = ivy.shape(a)
a_ndim = len(a_shape)
weights_shape = ivy.shape(weights)
# Make sure the dimensions work out
if a_shape != weights_shape:
if len(weights_shape) != 1:
raise ValueError(
"1D weights expected when shapes of a and weights differ."
)
if axis is None:
raise ValueError(
"Axis must be specified when shapes of a and weights differ."
)
elif isinstance(axis, tuple):
raise ValueError(
"Single axis expected when shapes of a and weights differ"
)
elif weights.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis."
)
weights = ivy.broadcast_to(
weights, shape=(a_ndim - 1) * (1,) + weights_shape
)
weights = ivy.moveaxis(weights, -1, axis)
weights_sum = ivy.sum(weights, axis=axis)
ret = ivy.sum(a * weights, axis=axis, keepdims=keepdims) / weights_sum
if returned:
if ret.shape != weights_sum.shape:
weights_sum = ivy.broadcast_to(weights_sum, shape=ret.shape)
return ret, weights_sum
return ret
@to_ivy_arrays_and_back
def bincount(x, weights=None, minlength=0, *, length=None):
x_list = [int(x[i]) for i in range(x.shape[0])]
max_val = int(ivy.max(ivy.array(x_list)))
ret = [x_list.count(i) for i in range(0, max_val + 1)]
ret = ivy.array(ret)
ret = ivy.astype(ret, ivy.as_ivy_dtype(ivy.int64))
return ret
@to_ivy_arrays_and_back
def corrcoef(x, y=None, rowvar=True):
return ivy.corrcoef(x, y=y, rowvar=rowvar)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"0.4.24 and below": ("float16", "bfloat16")}, "jax")
def correlate(a, v, mode="valid", precision=None):
if ivy.get_num_dims(a) != 1 or ivy.get_num_dims(v) != 1:
raise ValueError("correlate() only support 1-dimensional inputs.")
if a.shape[0] == 0 or v.shape[0] == 0:
raise ValueError(
f"correlate: inputs cannot be empty, got shapes {a.shape} and {v.shape}."
)
if v.shape[0] > a.shape[0]:
need_flip = True
a, v = v, a
else:
need_flip = False
out_order = slice(None)
if mode == "valid":
padding = [(0, 0)]
elif mode == "same":
padding = [(v.shape[0] // 2, v.shape[0] - v.shape[0] // 2 - 1)]
elif mode == "full":
padding = [(v.shape[0] - 1, v.shape[0] - 1)]
else:
raise ValueError("mode must be one of ['full', 'same', 'valid']")
result = ivy.conv_general_dilated(
a[None, None, :],
v[:, None, None],
(1,),
padding,
dims=1,
data_format="channel_first",
)
return ivy.flip(result[0, 0, out_order]) if need_flip else result[0, 0, out_order]
@to_ivy_arrays_and_back
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):
return ivy.cov(
m, y, rowVar=rowvar, bias=bias, ddof=ddof, fweights=fweights, aweights=aweights
)
@handle_jax_dtype
@to_ivy_arrays_and_back
def cumprod(a, axis=None, dtype=None, out=None):
if dtype is None:
dtype = ivy.as_ivy_dtype(a.dtype)
return ivy.cumprod(a, axis=axis, dtype=dtype, out=out)
@handle_jax_dtype
@to_ivy_arrays_and_back
def cumsum(a, axis=0, dtype=None, out=None):
if dtype is None:
dtype = ivy.uint8
return ivy.cumsum(a, axis, dtype=dtype, out=out)
@to_ivy_arrays_and_back
def einsum(
subscripts,
*operands,
out=None,
optimize="optimal",
precision=None,
preferred_element_type=None,
_use_xeinsum=False,
_dot_general=None,
):
return ivy.einsum(subscripts, *operands, out=out)
@to_ivy_arrays_and_back
def max(a, axis=None, out=None, keepdims=False, where=None):
ret = ivy.max(a, axis=axis, out=out, keepdims=keepdims)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_jax_dtype
@to_ivy_arrays_and_back
def mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=None):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
dtype = "float32" if ivy.is_int_dtype(a) else a.dtype
ret = ivy.mean(a, axis=axis, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ivy.astype(ret, ivy.as_ivy_dtype(dtype), copy=False)
@to_ivy_arrays_and_back
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
return ivy.median(a, axis=axis, out=out, keepdims=keepdims)
@to_ivy_arrays_and_back
def min(a, axis=None, out=None, keepdims=False, where=None):
ret = ivy.min(a, axis=axis, out=out, keepdims=keepdims)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_jax_dtype
@to_ivy_arrays_and_back
def nancumprod(a, axis=None, dtype=None, out=None):
a = ivy.where(ivy.isnan(a), ivy.zeros_like(a), a)
return ivy.cumprod(a, axis=axis, dtype=dtype, out=out)
@handle_jax_dtype
@to_ivy_arrays_and_back
def nancumsum(a, axis=None, dtype=None, out=None):
a = ivy.where(ivy.isnan(a), ivy.zeros_like(a), a)
return ivy.cumsum(a, axis=axis, dtype=dtype, out=out)
@to_ivy_arrays_and_back
def nanmax(
a,
axis=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
nan_mask = ivy.isnan(a)
a = ivy.where(ivy.logical_not(nan_mask), a, a.full_like(-ivy.inf))
where_mask = None
if initial is not None:
if ivy.is_array(where):
a = ivy.where(where, a, a.full_like(initial))
where_mask = ivy.all(ivy.logical_not(where), axis=axis, keepdims=keepdims)
s = ivy.shape(a, as_array=True)
if axis is not None:
if isinstance(axis, (tuple, list)) or ivy.is_array(axis):
# introducing the initial in one dimension is enough
ax = axis[0] % len(s)
else:
ax = axis % len(s)
s[ax] = ivy.array(1)
header = ivy.full(ivy.Shape(s.to_list()), initial, dtype=ivy.dtype(a))
if axis:
if isinstance(axis, (tuple, list)) or ivy.is_array(axis):
a = ivy.concat([a, header], axis=axis[0])
else:
a = ivy.concat([a, header], axis=axis)
else:
a = ivy.concat([a, header], axis=0)
res = ivy.max(a, axis=axis, keepdims=keepdims, out=out)
if nan_mask is not None:
nan_mask = ivy.all(nan_mask, axis=axis, keepdims=keepdims, out=out)
if ivy.any(nan_mask):
res = ivy.where(
ivy.logical_not(nan_mask),
res,
initial if initial is not None else ivy.nan,
out=out,
)
if where_mask is not None and ivy.any(where_mask):
res = ivy.where(ivy.logical_not(where_mask), res, ivy.nan, out=out)
return res.astype(ivy.dtype(a))
@handle_jax_dtype
@to_ivy_arrays_and_back
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=None):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
dtype = "float64" if ivy.is_int_dtype(a) else a.dtype
if ivy.is_array(where):
where1 = ivy.array(where, dtype=ivy.bool)
a = ivy.where(where1, a, ivy.full_like(a, ivy.nan))
nan_mask1 = ivy.isnan(a)
not_nan_mask1 = ~ivy.isnan(a)
b1 = ivy.where(ivy.logical_not(nan_mask1), a, ivy.zeros_like(a))
array_sum1 = ivy.sum(b1, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
not_nan_mask_count1 = ivy.sum(
not_nan_mask1, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
count_zero_handel = ivy.where(
not_nan_mask_count1 != 0,
not_nan_mask_count1,
ivy.full_like(not_nan_mask_count1, ivy.nan),
)
return ivy.divide(array_sum1, count_zero_handel)
@to_ivy_arrays_and_back
def nanmedian(
a,
/,
*,
axis=None,
keepdims=False,
out=None,
overwrite_input=False,
):
return ivy.nanmedian(
a, axis=axis, keepdims=keepdims, out=out, overwrite_input=overwrite_input
).astype(a.dtype)
@to_ivy_arrays_and_back
def nanmin(
a,
axis=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
nan_mask = ivy.isnan(a)
a = ivy.where(ivy.logical_not(nan_mask), a, a.full_like(+ivy.inf))
where_mask = None
if initial is not None:
if ivy.is_array(where):
a = ivy.where(where, a, a.full_like(initial))
where_mask = ivy.all(ivy.logical_not(where), axis=axis, keepdims=keepdims)
s = ivy.shape(a, as_array=True)
if axis is not None:
if isinstance(axis, (tuple, list)) or ivy.is_array(axis):
# introducing the initial in one dimension is enough
ax = axis[0] % len(s)
else:
ax = axis % len(s)
s[ax] = ivy.array(1)
header = ivy.full(ivy.Shape(s.to_list()), initial, dtype=ivy.dtype(a))
if axis:
if isinstance(axis, (tuple, list)) or ivy.is_array(axis):
a = ivy.concat([a, header], axis=axis[0])
else:
a = ivy.concat([a, header], axis=axis)
else:
a = ivy.concat([a, header], axis=0)
res = ivy.min(a, axis=axis, keepdims=keepdims, out=out)
if nan_mask is not None:
nan_mask = ivy.all(nan_mask, axis=axis, keepdims=keepdims, out=out)
if ivy.any(nan_mask):
res = ivy.where(
ivy.logical_not(nan_mask),
res,
initial if initial is not None else ivy.nan,
out=out,
)
if where_mask is not None and ivy.any(where_mask):
res = ivy.where(ivy.logical_not(where_mask), res, ivy.nan, out=out)
return res.astype(ivy.dtype(a))
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"0.4.14 and below": ("complex64", "complex128", "bfloat16", "bool", "float16")},
"jax",
)
def nanpercentile(
a, q, axis=None, out=None, overwrite_input=False, method="linear", keepdims=None
):
def _remove_nan_1d(arr1d, overwrite_input=False):
if arr1d.dtype == object:
c = ivy.not_equal(arr1d, arr1d)
else:
c = ivy.isnan(arr1d)
s = ivy.nonzero(c)[0]
if s.size == arr1d.size:
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
else:
if not overwrite_input:
arr1d = arr1d.copy()
enonan = arr1d[-s.size :][~c[-s.size :]]
arr1d[s[: enonan.size]] = enonan
return arr1d[: -s.size], True
def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"):
arr1d, overwrite_input = _remove_nan_1d(arr1d, overwrite_input=overwrite_input)
if arr1d.size == 0:
return ivy.full(q.shape, ivy.nan)
return ivy.quantile(arr1d, q, interpolation=method)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
ndim = ivy.get_num_dims(arr)
if axis is None:
raise ValueError("Axis must be an integer.")
if not -ndim <= axis < ndim:
raise ValueError(
f"axis {axis} is out of bounds for array of dimension {ndim}"
)
if axis < 0:
axis = axis + ndim
def func(elem):
return func1d(elem, *args, **kwargs)
for i in range(1, ndim - axis):
func = ivy.vmap(func, in_axes=i, out_axes=-1)
for i in range(axis):
func = ivy.vmap(func, in_axes=0, out_axes=0)
return ivy.asarray(func(arr))
def _nanquantile_ureduce_func(
a, q, axis=None, out=None, overwrite_input=False, method="linear"
):
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanquantile_1d(
part, q, overwrite_input=overwrite_input, method=method
)
else:
result = apply_along_axis(
_nanquantile_1d, axis, a, q, overwrite_input, method
)
if q.ndim != 0:
result = ivy.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
def _ureduce(a, func, keepdims=False, **kwargs):
axis = kwargs.get("axis", None)
out = kwargs.get("out", None)
if keepdims is None:
keepdims = False
nd = a.ndim
if axis is not None:
axis = _normalize_axis_tuple(axis, nd)
if keepdims:
if out is not None:
index_out = tuple(
0 if i in axis else slice(None) for i in range(nd)
)
kwargs["out"] = out[(Ellipsis,) + index_out]
if len(axis) == 1:
kwargs["axis"] = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs["axis"] = -1
else:
if keepdims:
if out is not None:
index_out = (0,) * nd
kwargs["out"] = out[(Ellipsis,) + index_out]
r = func(a, **kwargs)
if out is not None:
return out
if keepdims:
if axis is None:
index_r = (ivy.newaxis,) * nd
else:
index_r = tuple(
ivy.newaxis if i in axis else slice(None) for i in range(nd)
)
r = r[(Ellipsis,) + index_r]
return r
def _nanquantile_unchecked(
a,
q,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=None,
):
"""Assumes that q is in [0, 1], and is an ndarray."""
if a.size == 0:
return ivy.nanmean(a, axis=axis, out=out, keepdims=keepdims)
return _ureduce(
a,
func=_nanquantile_ureduce_func,
q=q,
keepdims=keepdims,
axis=axis,
out=out,
overwrite_input=overwrite_input,
method=method,
)
a = ivy.array(a)
q = ivy.divide(q, 100.0)
q = ivy.array(q)
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if not (0.0 <= q[i] <= 1.0):
ivy.logging.warning("percentile s must be in the range [0, 100]")
return []
else:
if not (ivy.all(q >= 0) and ivy.all(q <= 1)):
ivy.logging.warning("percentile s must be in the range [0, 100]")
return []
return _nanquantile_unchecked(a, q, axis, out, overwrite_input, method, keepdims)
@handle_jax_dtype
@to_ivy_arrays_and_back
def nanstd(
a, /, *, axis=None, dtype=None, out=None, ddof=0, keepdims=False, where=True
):
a = ivy.nan_to_num(a)
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype:
a = ivy.astype(ivy.array(a), ivy.as_ivy_dtype(dtype))
ret = ivy.std(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_jax_dtype
@to_ivy_arrays_and_back
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
is_nan = ivy.isnan(a)
if dtype is None:
dtype = "float16" if ivy.is_int_dtype(a) else a.dtype
if ivy.any(is_nan):
a = [i for i in a if ivy.isnan(i) is False]
if dtype:
a = ivy.astype(ivy.array(a), ivy.as_ivy_dtype(dtype))
ret = ivy.var(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
all_nan = ivy.isnan(ret)
if ivy.all(all_nan):
ret = ivy.astype(ret, ivy.array([float("inf")]))
return ret
@to_ivy_arrays_and_back
def ptp(a, axis=None, out=None, keepdims=False):
x = ivy.max(a, axis=axis, keepdims=keepdims)
y = ivy.min(a, axis=axis, keepdims=keepdims)
return ivy.subtract(x, y)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"0.4.24 and below": ("complex64", "complex128", "bfloat16", "bool", "float16")},
"jax",
)
def quantile(
a,
q,
/,
*,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=False,
interpolation=None,
):
if method == "nearest":
return ivy.quantile(
a, q, axis=axis, keepdims=keepdims, interpolation="nearest_jax", out=out
)
return ivy.quantile(
a, q, axis=axis, keepdims=keepdims, interpolation=method, out=out
)
@handle_jax_dtype
@with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, "jax")
@to_ivy_arrays_and_back
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=None):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
dtype = "float32" if ivy.is_int_dtype(a) else a.dtype
std_a = ivy.std(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
std_a = ivy.where(
where, std_a, ivy.default(out, ivy.zeros_like(std_a)), out=out
)
return ivy.astype(std_a, ivy.as_ivy_dtype(dtype), copy=False)
@handle_jax_dtype
@to_ivy_arrays_and_back
def sum(
a,
axis=None,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=None,
promote_integers=True,
):
# TODO: promote_integers is only supported from JAX v0.4.10
if dtype is None and promote_integers:
if ivy.is_bool_dtype(a.dtype):
dtype = ivy.default_int_dtype()
elif ivy.is_uint_dtype(a.dtype):
dtype = "uint64"
a = ivy.astype(a, dtype)
elif ivy.is_int_dtype(a.dtype):
dtype = "int64"
a = ivy.astype(a, dtype)
else:
dtype = a.dtype
elif dtype is None and not promote_integers:
dtype = "float32" if ivy.is_int_dtype(a.dtype) else ivy.as_ivy_dtype(a.dtype)
if initial:
if axis is None:
a = ivy.reshape(a, (1, -1))
axis = 0
s = list(ivy.shape(a))
s[axis] = 1
header = ivy.full(s, initial)
a = ivy.concat([a, header], axis=axis)
ret = ivy.sum(a, axis=axis, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ivy.astype(ret, ivy.as_ivy_dtype(dtype))
@handle_jax_dtype
@to_ivy_arrays_and_back
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=None):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
dtype = "float32" if ivy.is_int_dtype(a) else a.dtype
ret = ivy.var(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ivy.astype(ret, ivy.as_ivy_dtype(dtype), copy=False)
amax = max
amin = min
cumproduct = cumprod
| ivy/ivy/functional/frontends/jax/numpy/statistical.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/statistical.py",
"repo_id": "ivy",
"token_count": 11250
} | 33 |
# local
import ivy
from ivy.functional.frontends.mxnet.func_wrapper import (
to_ivy_arrays_and_back,
handle_mxnet_out,
)
from ivy.functional.frontends.mxnet.numpy import promote_types_of_mxnet_inputs
@handle_mxnet_out
@to_ivy_arrays_and_back
def add(x1, x2, out=None):
x1, x2 = promote_types_of_mxnet_inputs(x1, x2)
return ivy.add(x1, x2, out=out)
@handle_mxnet_out
@to_ivy_arrays_and_back
def sin(x, out=None, **kwargs):
return ivy.sin(x, out=out, **kwargs)
| ivy/ivy/functional/frontends/mxnet/numpy/mathematical_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy/mathematical_functions.py",
"repo_id": "ivy",
"token_count": 221
} | 34 |
# global
# local
import ivy
import ivy.functional.frontends.numpy as np_frontend
class dtype:
def __init__(self, dtype_in, align=False, copy=False):
self._ivy_dtype = (
to_ivy_dtype(dtype_in)
if not isinstance(dtype_in, dtype)
else dtype_in._ivy_dtype
)
def __repr__(self):
return "ivy.frontends.numpy.dtype('" + self._ivy_dtype + "')"
def __ge__(self, other):
try:
other = dtype(other)
except TypeError as e:
raise ivy.utils.exceptions.IvyException(
"Attempted to compare a dtype with something which"
"couldn't be interpreted as a dtype"
) from e
return self == np_frontend.promote_numpy_dtypes(
self._ivy_dtype, other._ivy_dtype
)
def __gt__(self, other):
try:
other = dtype(other)
except TypeError as e:
raise ivy.utils.exceptions.IvyException(
"Attempted to compare a dtype with something which"
"couldn't be interpreted as a dtype"
) from e
return self >= other and self != other
def __lt__(self, other):
try:
other = dtype(other)
except TypeError as e:
raise ivy.utils.exceptions.IvyException(
"Attempted to compare a dtype with something which"
"couldn't be interpreted as a dtype"
) from e
return self != np_frontend.promote_numpy_dtypes(
self._ivy_dtype, other._ivy_dtype
)
def __le__(self, other):
try:
other = dtype(other)
except TypeError as e:
raise ivy.utils.exceptions.IvyException(
"Attempted to compare a dtype with something which"
"couldn't be interpreted as a dtype"
) from e
return self < other or self == other
@property
def type(self):
return np_frontend.numpy_dtype_to_scalar[self._ivy_dtype]
@property
def alignment(self):
if self._ivy_dtype.is_bool_dtype:
return 1
return self._ivy_dtype.dtype_bits // 8
@property
def base(self):
return self
@property
def char(self):
return np_frontend.numpy_type_to_str_and_num_table[self._ivy_dtype][0]
@property
def byteorder(self):
if self._ivy_dtype[-1] == 8:
return "|"
else:
return "="
@property
def itemsize(self):
return self._ivy_dtype.dtype_bits // 8
@property
def kind(self):
if self._ivy_dtype.is_bool_dtype:
return "b"
elif self._ivy_dtype.is_int_dtype:
return "i"
elif self._ivy_dtype.is_uint_dtype:
return "u"
elif self._ivy_dtype.is_float_dtype:
return "f"
else:
return "V"
@property
def num(self):
return np_frontend.numpy_type_to_str_and_num_table[self._ivy_dtype][1]
@property
def shape(self):
return ()
@property
def str(self):
if self._ivy_dtype.is_bool_dtype:
return "|b1"
elif self._ivy_dtype.is_uint_dtype:
if self._ivy_dtype[4::] == "8":
return "|u1"
return "<u" + str(self.alignment)
elif self._ivy_dtype.is_int_dtype:
if self._ivy_dtype[3::] == "8":
return "|i1"
return "<i" + str(self.alignment)
elif self._ivy_dtype.is_float_dtype:
return "<f" + str(self.alignment)
@property
def subtype(self):
return None
@property
def ivy_dtype(self):
return self._ivy_dtype
@property
def name(self):
return self._ivy_dtype.__repr__()
def to_ivy_dtype(dtype_in):
if dtype_in is None:
return
if isinstance(dtype_in, ivy.Dtype):
return dtype_in
if isinstance(dtype_in, str):
if dtype_in.strip("><=") in np_frontend.numpy_str_to_type_table:
return ivy.Dtype(np_frontend.numpy_str_to_type_table[dtype_in.strip("><=")])
return ivy.Dtype(dtype_in)
if ivy.is_native_dtype(dtype_in):
return ivy.as_ivy_dtype(dtype_in)
if dtype_in in (int, float, bool):
return {int: ivy.int64, float: ivy.float64, bool: ivy.bool}[dtype_in]
if isinstance(dtype_in, np_frontend.dtype):
return dtype_in.ivy_dtype
if isinstance(dtype_in, type):
if issubclass(dtype_in, np_frontend.generic):
return np_frontend.numpy_scalar_to_dtype[dtype_in]
if hasattr(dtype_in, "dtype"):
return dtype_in.dtype.ivy_dtype
else:
return ivy.as_ivy_dtype(dtype_in)
| ivy/ivy/functional/frontends/numpy/data_type_routines/creating_data_types.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/data_type_routines/creating_data_types.py",
"repo_id": "ivy",
"token_count": 2451
} | 35 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def cholesky(a):
return ivy.cholesky(a)
@to_ivy_arrays_and_back
def qr(a, mode="reduced"):
return ivy.qr(a, mode=mode)
@to_ivy_arrays_and_back
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
# Todo: conpute_uv and hermitian handling
return ivy.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
| ivy/ivy/functional/frontends/numpy/linalg/decompositions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/linalg/decompositions.py",
"repo_id": "ivy",
"token_count": 205
} | 36 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _exp(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.exp(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _exp2(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.pow(2, x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _expm1(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.expm1(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _frexp(
x,
/,
out1_2=(None, None),
out=(None, None),
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
mant, exp = ivy.frexp(x, out=out)
if ivy.is_array(where):
mant = ivy.where(
where, mant, ivy.default(out[0], ivy.zeros_like(mant)), out=out[0]
)
exp = ivy.where(
where, exp, ivy.default(out[1], ivy.zeros_like(exp)), out=out[1]
)
return mant, exp
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _ldexp(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.ldexp(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _log(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.log(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _log10(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.log10(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _log1p(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.log1p(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _log2(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.log2(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logaddexp(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logaddexp(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _logaddexp2(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.logaddexp2(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def i0(x):
return ivy.i0(x)
| ivy/ivy/functional/frontends/numpy/mathematical_functions/exponents_and_logarithms.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/exponents_and_logarithms.py",
"repo_id": "ivy",
"token_count": 2634
} | 37 |
# local
import ivy.functional.frontends.numpy as ivy_np
class Generator:
def __init__(self, bit_generator=None):
self.seed = bit_generator
def multinomial(self, n, pvals, size=None):
ivy_np.random.multinomial(n, pvals, size=size)
def default__rng(seed=None):
return Generator(bit_generator=seed)
| ivy/ivy/functional/frontends/numpy/random/Generator/Generator.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/random/Generator/Generator.py",
"repo_id": "ivy",
"token_count": 133
} | 38 |
from .methods import *
| ivy/ivy/functional/frontends/numpy/ufunc/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/ufunc/__init__.py",
"repo_id": "ivy",
"token_count": 7
} | 39 |
# global
import ivy
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def abs(x, name=None):
return ivy.abs(x)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def broadcast_to(x, shape, name=None):
return ivy.broadcast_to(x, shape)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"float16",
"float32",
"float64",
"int32",
"int64",
"uint8",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def cast(x, dtype):
return ivy.astype(x, dtype)
@with_unsupported_dtypes({"2.6.0 and below": ("int8", "int16")}, "paddle")
@to_ivy_arrays_and_back
def concat(x, axis, name=None):
return ivy.concat(x, axis=axis)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def expand(x, shape, name=None):
return ivy.expand(x, shape)
@with_unsupported_dtypes(
{"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
"paddle",
)
@to_ivy_arrays_and_back
def flip(x, axis, name=None):
return ivy.flip(x, axis=axis)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def gather(params, indices, axis=-1, batch_dims=0, name=None):
return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)
@with_unsupported_dtypes(
{"2.6.0 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
"paddle",
)
@to_ivy_arrays_and_back
def gather_nd(x, index, name=None):
return ivy.gather_nd(x, index)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def index_add(x, index, axis, value, *, name=None):
x = ivy.swapaxes(x, axis, 0)
value = ivy.swapaxes(value, axis, 0)
_to_adds = []
index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))
while index:
_curr_idx = index[0][0]
while len(_to_adds) < _curr_idx:
_to_adds.append(ivy.zeros_like(value[0]))
_to_add_cum = ivy.get_item(value, index[0][1])
while (len(index)) > 1 and (index[0][0] == index[1][0]):
_to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])
index.pop(0)
_to_adds.append(_to_add_cum)
while len(_to_adds) < x.shape[0]:
_to_adds.append(ivy.zeros_like(value[0]))
_to_adds = ivy.stack(_to_adds)
if len(x.shape) < 2:
# Added this line due to the paddle backend treating scalars as 1-d arrays
_to_adds = ivy.flatten(_to_adds)
ret = ivy.add(x, _to_adds)
ret = ivy.swapaxes(ret, axis, 0)
return ret
@to_ivy_arrays_and_back
def put_along_axis(arr, indices, values, axis, reduce="assign"):
result = ivy.put_along_axis(arr, indices, values, axis)
return result
@with_supported_dtypes(
{"2.6.0 and below": ("int32", "int64", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def repeat_interleave(x, repeats, axis=None, name=None):
return ivy.repeat(x, repeats, axis=axis)
@to_ivy_arrays_and_back
def reshape(x, shape, name=None):
return ivy.reshape(x, shape)
@with_supported_dtypes(
{
"2.5.0 and below": (
"float32",
"float64",
"int32",
"int64",
"complex64",
"complex128",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def roll(x, shifts, axis=None, name=None):
return ivy.roll(x, shifts, axis=axis)
@with_supported_device_and_dtypes(
{
"2.6.0 and above": {
"cpu": (
"bool",
"int32",
"int64",
"float32",
"float64",
),
"gpu": ("float16",),
},
},
"paddle",
)
@to_ivy_arrays_and_back
def rot90(x, k=1, axes=(0, 1), name=None):
return ivy.rot90(x, k=k, axes=axes)
@with_unsupported_dtypes(
{"2.6.0 and below": ("int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def split(x, num_or_sections, axis=0, name=None):
return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
@with_unsupported_dtypes(
{"2.6.0 and below": ("float16", "bfloat16", "int8", "int16")},
"paddle",
)
@to_ivy_arrays_and_back
def squeeze(x, axis=None, name=None):
return ivy.squeeze(x, axis=axis)
@to_ivy_arrays_and_back
def stack(x, axis=0, name=None):
return ivy.stack(x, axis=axis)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def take_along_axis(arr, indices, axis):
return ivy.take_along_axis(arr, indices, axis)
@with_unsupported_dtypes(
{"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
"paddle",
)
@to_ivy_arrays_and_back
def tile(x, repeat_times, name=None):
return ivy.tile(x, repeats=repeat_times)
@to_ivy_arrays_and_back
def tolist(x):
return ivy.to_list(x)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def unbind(input, axis=0):
shape = list(input.shape)
num_splits = shape[axis]
shape.pop(axis)
return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def unique_consecutive(x, axis=0):
return ivy.unique_consecutive(x, axis=axis)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def unstack(x, axis=0, name=None):
return ivy.unstack(x, axis=axis)
absolute = abs
| ivy/ivy/functional/frontends/paddle/manipulation.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/manipulation.py",
"repo_id": "ivy",
"token_count": 3097
} | 40 |
# global
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def mean(input, axis=None, keepdim=False, out=None):
ret = ivy.mean(input, axis=axis, keepdims=keepdim, out=out)
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def median(x, axis=None, keepdim=False, name=None):
x = (
ivy.astype(x, ivy.float64)
if ivy.dtype(x) == "float64"
else ivy.astype(x, ivy.float32)
)
return ivy.median(x, axis=axis, keepdims=keepdim)
@with_supported_dtypes(
{"2.5.0 and below": ("float16", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def nanmedian(x, axis=None, keepdim=True, name=None):
return ivy.nanmedian(x, axis=axis, keepdims=keepdim)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float16", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def numel(x, name=None):
prod = ivy.prod(x.size, dtype=ivy.int64)
try:
length = len(x)
except (ValueError, TypeError):
length = 1 # if 0 dimensional tensor with 1 element
return ivy.array(prod if prod > 0 else ivy.array(length, dtype=ivy.int64))
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "uint16")},
"paddle",
)
@to_ivy_arrays_and_back
def std(x, axis=None, unbiased=True, keepdim=False, name=None):
x = (
ivy.astype(x, ivy.float64)
if ivy.dtype(x) == "float64"
else ivy.astype(x, ivy.float32)
)
return ivy.std(x, axis=axis, correction=int(unbiased), keepdims=keepdim)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
if unbiased:
correction = 1
else:
correction = 0
return ivy.var(x, axis=axis, correction=correction, keepdims=keepdim)
| ivy/ivy/functional/frontends/paddle/stat.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/stat.py",
"repo_id": "ivy",
"token_count": 968
} | 41 |
# function wrappers for pandas frontend to handle common operations
from functools import wraps
def outputs_to_self_class(func):
@wraps(func)
def _outputs_to_self_class(*args, **kwargs):
self_arg = args[0]
return self_arg.__class__(
func(*args, **kwargs),
index=self_arg.index,
columns=self_arg.columns,
dtype=self_arg.dtype,
name=self_arg.name,
copy=self_arg.copy,
)
return _outputs_to_self_class
| ivy/ivy/functional/frontends/pandas/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/pandas/func_wrapper.py",
"repo_id": "ivy",
"token_count": 244
} | 42 |
from . import tree
import ivy
from ivy.functional.frontends.numpy import array
_int8 = ivy.IntDtype("int8")
_int16 = ivy.IntDtype("int16")
_int32 = ivy.IntDtype("int32")
_int64 = ivy.IntDtype("int64")
_uint8 = ivy.UintDtype("uint8")
_uint16 = ivy.UintDtype("uint16")
_uint32 = ivy.UintDtype("uint32")
_uint64 = ivy.UintDtype("uint64")
_bfloat16 = ivy.FloatDtype("bfloat16")
_float16 = ivy.FloatDtype("float16")
_float32 = ivy.FloatDtype("float32")
_float64 = ivy.FloatDtype("float64")
_complex64 = ivy.ComplexDtype("complex64")
_complex128 = ivy.ComplexDtype("complex128")
_bool = ivy.Dtype("bool")
_frontend_array = array
| ivy/ivy/functional/frontends/sklearn/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/__init__.py",
"repo_id": "ivy",
"token_count": 263
} | 43 |
import ivy
# repeated utility function
def type_of_target(y, input_name="y"):
# purely utility function
unique_vals = len(ivy.unique_values(y))
if y.ndim == 2 and y.shape[1] > 1 and unique_vals <= 2:
return "multilabel-indicator"
if y.ndim not in (1, 2):
return "unknown"
if ivy.is_float_dtype(y) and ivy.any(ivy.not_equal(y, y.astype("int64"))):
return "continuous"
else:
if unique_vals > 2:
return "multiclass"
else:
return "binary"
| ivy/ivy/functional/frontends/sklearn/utils/multiclass.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/utils/multiclass.py",
"repo_id": "ivy",
"token_count": 245
} | 44 |
# global
# local
import ivy
from ivy import with_unsupported_dtypes
import ivy.functional.frontends.tensorflow as tf_frontend
from ivy.functional.frontends.tensorflow.func_wrapper import _to_ivy_array
from ivy.functional.frontends.numpy.creation_routines.from_existing_data import array
class EagerTensor:
def __init__(self, array):
self._ivy_array = array if isinstance(array, ivy.Array) else ivy.array(array)
def __repr__(self):
return (
repr(self.ivy_array).replace(
"ivy.array", "ivy.frontends.tensorflow.EagerTensor"
)[:-1]
+ ", shape="
+ str(self.shape)
+ ", dtype="
+ str(self.ivy_array.dtype)
+ ")"
)
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def device(self):
return self.ivy_array.device
@property
def dtype(self):
return tf_frontend.DType(
tf_frontend.tensorflow_type_to_enum[self.ivy_array.dtype]
)
@property
def shape(self):
return TensorShape(self.ivy_array.shape.shape)
# Instance Methods #
# ---------------- #
def get_shape(self):
return tf_frontend.raw_ops.Shape(input=self)
def set_shape(self, shape):
if shape is None:
return
x_shape = self.shape
if len(x_shape) != len(shape):
raise ValueError(
f"Tensor's shape {x_shape} is not compatible with supplied shape "
f"{shape}."
)
for i, v in enumerate(x_shape):
if v != shape[i] and (shape[i] is not None):
raise ValueError(
f"Tensor's shape {x_shape} is not compatible with supplied shape "
f"{shape}."
)
def numpy(self):
return array(self.ivy_array)
def __add__(self, y, name="add"):
return self.__radd__(y)
def __div__(self, y, name="div"):
if "int" in self._ivy_array.dtype:
return tf_frontend.raw_ops.FloorDiv(x=self, y=y, name=name)
ret = tf_frontend.math.divide(self, y, name=name)
return tf_frontend.cast(ret, self.dtype)
def __and__(self, y, name="and"):
return self.__rand__(y)
def __array__(self, dtype=None, name="array"):
if not dtype:
return ivy.to_numpy(self.ivy_array)
return ivy.to_numpy(self.ivy_array).astype(dtype)
def __bool__(self, name="bool"):
temp = ivy.squeeze(self.ivy_array, axis=None)
if temp.shape != ():
raise ValueError(
"The truth value of an array with more than one element is ambiguous. "
"Use a.any() or a.all()"
)
return temp != 0
def __eq__(self, other):
return tf_frontend.raw_ops.Equal(
x=self, y=other, incompatible_shape_error=False
)
def __floordiv__(self, y, name="floordiv"):
return tf_frontend.raw_ops.FloorDiv(x=self, y=y, name=name)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
def __ge__(self, y, name="ge"):
return tf_frontend.raw_ops.GreaterEqual(x=self, y=y, name=name)
def __getitem__(self, slice_spec, var=None, name="getitem"):
ivy_args = ivy.nested_map(_to_ivy_array, [self, slice_spec])
ret = ivy.get_item(*ivy_args)
return EagerTensor(ret)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
def __gt__(self, y, name="gt"):
return tf_frontend.raw_ops.Greater(x=self, y=y, name=name)
def __invert__(self, name="invert"):
return tf_frontend.raw_ops.Invert(x=self, name=name)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
def __le__(self, y, name="le"):
return tf_frontend.raw_ops.LessEqual(x=self, y=y, name=name)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
def __lt__(self, y, name="lt"):
return tf_frontend.raw_ops.Less(x=self, y=y, name=name)
def __matmul__(self, y, name="matmul"):
return tf_frontend.linalg.matmul(a=self, b=y, name=name)
def __mul__(self, y, name="mul"):
return tf_frontend.math.multiply(self, y, name=name)
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)
def __mod__(self, y, name="mod"):
return tf_frontend.floormod(self, y, name=name)
def __ne__(self, other):
return tf_frontend.raw_ops.NotEqual(
x=self, y=other, incompatible_shape_error=False
)
def __neg__(self, name="neg"):
return tf_frontend.raw_ops.Neg(x=self, name=name)
__nonzero__ = __bool__
def __or__(self, y, name="or"):
return self.__ror__(y)
def __pow__(self, y, name="pow"):
return tf_frontend.math.pow(x=self, y=y, name=name)
def __radd__(self, x, name="radd"):
return tf_frontend.math.add(self, x, name=name)
def __rand__(self, x, name="rand"):
return tf_frontend.raw_ops.BitwiseAnd(y=self, x=x, name=name)
def __rfloordiv__(self, x, name="rfloordiv"):
return tf_frontend.raw_ops.FloorDiv(x=x, y=self, name=name)
def __rmatmul__(self, x, name="rmatmul"):
return tf_frontend.linalg.matmul(a=x, b=self, name=name)
def __rmul__(self, x, name="rmul"):
return tf_frontend.raw_ops.Mul(x=self, y=x, name=name)
def __ror__(self, x, name="ror"):
return tf_frontend.raw_ops.BitwiseOr(x=self, y=x, name=name)
def __rpow__(self, x, name="rpow"):
return tf_frontend.math.pow(x=x, y=self, name=name)
def __rsub__(self, x, name="rsub"):
return tf_frontend.math.subtract(x, self, name=name)
def __rtruediv__(self, x, name="rtruediv"):
return tf_frontend.math.truediv(x, self, name=name)
def __rxor__(self, x, name="rxor"):
return tf_frontend.raw_ops.BitwiseXor(x=self, y=x, name=name)
def __sub__(self, y, name="sub"):
return tf_frontend.math.subtract(self, y, name=name)
def __truediv__(self, y, name="truediv"):
return tf_frontend.math.truediv(self, y, name=name)
def __len__(self):
return len(self.ivy_array)
def __xor__(self, y, name="xor"):
return self.__rxor__(y)
def __setitem__(self, key, value):
raise ivy.utils.exceptions.IvyException(
"ivy.functional.frontends.tensorflow.EagerTensor object "
"doesn't support assignment"
)
def __iter__(self):
ndim = len(self.shape)
if ndim == 0:
raise TypeError("iteration over a 0-d tensor not supported")
for i in range(self.shape[0]):
yield self[i]
class TensorShape:
# TODO: there are still some methods that may need implementing
def __init__(self, dims):
self._dims = tuple(dims)
def __repr__(self):
if self._dims is not None:
return f"TensorShape({list(self._dims)})"
else:
return "TensorShape(None)"
def __str__(self):
if self.rank is None:
return "<unknown>"
elif self.rank == 1:
return f"({self._dims[0]},)"
else:
return f'({", ".join(str(d) for d in self._dims)})'
# Properties #
# ---------- #
@property
def dims(self):
return self._dims
@property
def ivy_shape(self):
return ivy.Shape(self._dims)
@property
def ndims(self):
return self.__len__()
@property
def rank(self):
return self.__len__()
# Instance Methods #
# ---------------- #
def __add__(self, other):
return self.concatenate(other)
def __bool__(self):
return self._dims is not None
def __concat__(self, other):
return self.concatenate(other)
def __eq__(self, other):
return self._dims == other.dims
def __getitem__(self, key):
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
def __iter__(self):
return iter(d for d in self._dims)
def __len__(self):
return len(self._dims)
def __nonzero__(self):
return self.__bool__()
def __radd__(self, other):
return other.concatenate(self)
def as_list(self):
return list(self._dims)
def concatenate(self, other):
other = as_shape(other)
if self.dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self.dims + other.dims)
def num_elements(self):
return ivy.to_scalar(ivy.prod(self._dims))
# Dummy Tensor class to help with compilation, don't add methods here
class Tensor(EagerTensor):
pass
# Helpers
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(rank=None, **kwargs):
if rank is None and "ndims" in kwargs:
rank = kwargs.pop("ndims")
if kwargs:
raise TypeError(f"Unknown argument: {kwargs}")
if rank is None:
return TensorShape(None)
else:
return TensorShape([None] * rank)
| ivy/ivy/functional/frontends/tensorflow/tensor.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/tensor.py",
"repo_id": "ivy",
"token_count": 4546
} | 45 |
# global
import math
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
# --- Helpers --- #
# --------------- #
def _conv(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
dims = len(input.shape) - 2
if isinstance(padding, str):
padding = padding.upper()
else:
if isinstance(padding, int):
padding = [*[(padding, padding) for _ in range(dims)]]
else:
padding = [*[(p, p) for p in padding]]
ret = ivy.conv_general_dilated(
input,
weight,
stride,
padding,
dims=dims,
data_format="channel_first",
filter_format="channel_first",
dilations=dilation,
feature_group_count=groups,
bias=bias,
)
return ret
def _conv_transpose(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
dims = len(input.shape) - 2
weight = ivy.permute_dims(weight, axes=(*range(2, dims + 2), 0, 1))
for i in range(dims):
weight = ivy.flip(weight, axis=i)
padding, output_padding, stride, dilation = map(
lambda x: [x] * dims if isinstance(x, int) else x,
[padding, output_padding, stride, dilation],
)
pad_widths = [
(
(weight.shape[i] - 1) * dilation[i]
+ max([output_padding[i] - padding[i], 0]),
)
* 2
for i in range(dims)
]
ret = ivy.conv_general_dilated(
input,
weight,
1,
pad_widths,
dims=dims,
data_format="channel_first",
feature_group_count=groups,
x_dilations=stride,
dilations=dilation,
bias=bias,
)
unpad_slice = (slice(None),) * 2
for i in range(dims):
unpad_slice += (
slice(
max([padding[i] - (dilation[i] // 2), padding[i], output_padding[i]]),
ret.shape[2 + i] - padding[i] + output_padding[i] + (dilation[i] // 2),
1,
),
)
ret = ret[unpad_slice]
return ret
def _get_transpose_pad(padding, output_padding, dims):
(
padding,
output_padding,
) = map(
lambda x: [x] * dims if isinstance(x, int) else x, [padding, output_padding]
)
asymmetric_padding = [
[pad, pad - output_pad] for pad, output_pad in zip(padding, output_padding)
]
return asymmetric_padding
# --- Main --- #
# ------------ #
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
return _conv(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
return _conv(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
return _conv(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv_transpose1d(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
if ivy.current_backend_str() in ["torch"]:
# this backend supports explicit padding, no need for conv_general_dilated
return ivy.conv_general_transpose(
input,
weight,
stride,
_get_transpose_pad(padding, output_padding, 1),
dims=1,
filter_format="channel_first",
data_format="channel_first",
dilations=dilation,
feature_group_count=groups,
bias=bias,
)
else:
return _conv_transpose(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv_transpose2d(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
if ivy.current_backend_str() in ["torch", "tensorflow"]:
# these two backends support explicit padding, no need for conv_general_dilated
return ivy.conv_general_transpose(
input,
weight,
stride,
_get_transpose_pad(padding, output_padding, 2),
dims=2,
filter_format="channel_first",
data_format="channel_first",
dilations=dilation,
feature_group_count=groups,
bias=bias,
)
else:
return _conv_transpose(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def conv_transpose3d(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
if ivy.current_backend_str() in ["torch"]:
# this backend supports explicit padding, no need for conv_general_dilated
return ivy.conv_general_transpose(
input,
weight,
stride,
_get_transpose_pad(padding, output_padding, 3),
dims=3,
filter_format="channel_first",
data_format="channel_first",
dilations=dilation,
feature_group_count=groups,
bias=bias,
)
else:
return _conv_transpose(
input,
weight,
bias=bias,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
@to_ivy_arrays_and_back
def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
orig_ndim = input.ndim
if orig_ndim == 2:
input = ivy.expand_dims(input, axis=0)
elif orig_ndim != 3:
raise ivy.utils.exceptions.IvyException(
"only 2D or batched 3D inputs are supported"
)
stride = [stride] * 2 if isinstance(stride, int) else stride
dilation = [dilation] * 2 if isinstance(dilation, int) else dilation
padding = [padding] * 2 if isinstance(padding, int) else padding
kernel_size = [kernel_size] * 2 if isinstance(kernel_size, int) else kernel_size
output_size = [output_size] * 2 if isinstance(output_size, int) else output_size
input_shape = [
(output_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1)
// stride[i]
+ 1
for i in range(2)
]
n_batches = input.shape[0]
n_channels = input.shape[1] // math.prod(kernel_size)
output = ivy.zeros((n_batches, n_channels, *output_size), dtype=input.dtype)
output_padded = ivy.zero_pad(
output,
((0, 0), (0, 0), (padding[0],) * 2, (padding[1],) * 2),
)
k = 0
for i in range(input_shape[0]):
for j in range(input_shape[1]):
i_in = i * stride[0]
j_in = j * stride[1]
patch = input[:, :, k].reshape((n_batches, n_channels, *kernel_size))
output_padded[
:,
:,
i_in : i_in + kernel_size[0] * dilation[0] : dilation[0],
j_in : j_in + kernel_size[1] * dilation[1] : dilation[1],
] += patch
k += 1
ret = ivy.array(
output_padded[
:,
:,
padding[0] : output_padded.shape[2] - padding[0],
padding[1] : output_padded.shape[3] - padding[1],
]
)
if orig_ndim == 2:
return ivy.squeeze(ret, axis=0)
return ret
@to_ivy_arrays_and_back
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
# TODO: refactor this function to use ivy.sliding_window, but ensure that the
# function is transpilable to all backends with varying batch size(see issue #25796)
if input.ndim != 4:
raise ivy.utils.exceptions.IvyException("only batched 4D inputs are supported")
stride = [stride] * 2 if isinstance(stride, int) else stride
dilation = [dilation] * 2 if isinstance(dilation, int) else dilation
padding = [padding] * 2 if isinstance(padding, int) else padding
kernel_size = [kernel_size] * 2 if isinstance(kernel_size, int) else kernel_size
output_shape = [
(input.shape[i + 2] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1)
// stride[i]
+ 1
for i in range(2)
]
ret = ivy.zeros((*input.shape[0:2], *kernel_size, *output_shape), dtype=input.dtype)
input_padded = ivy.zero_pad(
input,
((0, 0), (0, 0), (padding[0],) * 2, (padding[1],) * 2),
)
for i in range(output_shape[0]):
for j in range(output_shape[1]):
i_in = i * stride[0]
j_in = j * stride[1]
ret[:, :, :, :, i, j] = input_padded[
:,
:,
i_in : i_in + kernel_size[0] * dilation[0] : dilation[0],
j_in : j_in + kernel_size[1] * dilation[1] : dilation[1],
]
return ivy.reshape(
ret, (input.shape[0], input.shape[1] * math.prod(kernel_size), -1)
)
| ivy/ivy/functional/frontends/torch/nn/functional/convolution_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/convolution_functions.py",
"repo_id": "ivy",
"token_count": 5094
} | 46 |
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
numpy_to_torch_style_args,
)
from collections import namedtuple
import ivy.functional.frontends.torch as torch_frontend
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def all(input, dim=None, keepdim=False, *, out=None):
input_dtype = ivy.as_ivy_dtype(input.dtype)
ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)
if ivy.is_uint_dtype(input_dtype):
ret = ivy.astype(ret, input_dtype, out=out)
return ret
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def amax(input, dim=None, keepdim=False, *, out=None):
return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def amin(input, dim=None, keepdim=False, *, out=None):
return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16", "complex")}, "torch")
def aminmax(input, *, dim=None, keepdim=False, out=None):
minmax_tuple = namedtuple("minmax", ["min", "max"])
return minmax_tuple(
ivy.min(input, axis=dim, keepdims=keepdim, out=out),
ivy.max(input, axis=dim, keepdims=keepdim, out=out),
)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def any(input, dim=None, keepdim=False, *, out=None):
input_dtype = ivy.as_ivy_dtype(input.dtype)
ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)
if ivy.is_uint_dtype(input_dtype):
ret = ivy.astype(ret, input_dtype, out=out)
return ret
@with_unsupported_dtypes({"2.2 and below": ("complex", "bool")}, "torch")
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def argmax(input, dim=None, keepdim=False):
return ivy.argmax(input, axis=dim, keepdims=keepdim)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def argmin(input, dim=None, keepdim=False):
return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.2 and below": ("uint8", "int8")},
"torch",
)
def count_nonzero(input, dim=None):
return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)
@to_ivy_arrays_and_back
def dist(input, other, p=2):
return ivy.vector_norm(ivy.subtract(input, other), ord=p)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def logsumexp(input, dim, keepdim=False, *, out=None):
c = ivy.max(input, axis=dim, keepdims=True)
if ivy.get_num_dims(c) > 0:
c = ivy.where(ivy.isinf(c), ivy.zeros_like(c), c)
elif not ivy.isinf(c):
c = 0
exponential = ivy.exp(input - c)
sum = ivy.sum(exponential, axis=dim, keepdims=keepdim)
ret = ivy.log(sum)
if not keepdim:
c = ivy.squeeze(c, axis=dim)
ret = ivy.add(ret, c, out=out)
return ret
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def max(*input, dim=None, keepdim=False, out=None):
if len(input) == 1:
input = input[0]
elif len(input) == 2:
return torch_frontend.maximum(*input)
if dim is None:
return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
elif out is not None:
ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])
ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])
return out
else:
max_tuple = namedtuple("max", ["values", "indices"])
return max_tuple(
ivy.max(input, axis=dim, keepdims=keepdim),
ivy.argmax(input, axis=dim, keepdims=keepdim),
)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def mean(input, dim=None, keepdim=False, *, dtype=None, out=None):
if dtype is not None:
input = input.astype(dtype)
if out is not None:
out = out.astype(dtype)
return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex", "float16", "bool")}, "torch")
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def median(input, dim=None, keepdim=False, *, out=None):
if dim is None:
input = ivy.reshape(input, (-1,))
sorted_input = ivy.sort(input)
return sorted_input[(sorted_input.shape[0] - 1) // 2]
median_tuple = namedtuple("median", ["values", "indices"])
if input.ndim == 0:
result = median_tuple(input, ivy.array(0))
else:
sorted_indices = ivy.argsort(input, axis=dim)
median_indices = ivy.gather(
sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim
)
median_values = ivy.take_along_axis(
input, ivy.expand_dims(median_indices, axis=dim), dim
).squeeze(axis=dim)
if keepdim:
median_values = ivy.expand_dims(median_values, axis=dim)
median_indices = ivy.expand_dims(median_indices, axis=dim)
result = median_tuple(median_values, median_indices)
if out is not None:
ivy.inplace_update(out[0], result.values)
ivy.inplace_update(out[1], result.indices)
return out
return result
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.2 and below": ("complex64", "complex128")},
"torch",
)
def min(*input, dim=None, keepdim=False, out=None):
if len(input) == 1:
input = input[0]
elif len(input) == 2:
return torch_frontend.minimum(*input)
if dim is None:
return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
elif out is not None:
ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])
ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])
return out
else:
min_tuple = namedtuple("min", ["values", "indices"])
return min_tuple(
ivy.min(input, axis=dim, keepdims=keepdim),
ivy.argmin(input, axis=dim, keepdims=keepdim),
)
@to_ivy_arrays_and_back
def moveaxis(input, source, destination):
return ivy.moveaxis(input, source, destination)
@with_supported_dtypes({"2.2 and below": ("float",)}, "torch")
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):
return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def nanmedian(input, dim=None, keepdim=False, *, out=None):
if dim is None:
flattened_input = ivy.flatten(input)
sorted_input = ivy.sort(flattened_input)
nonnan_index = int(sorted_input.shape[0] - ivy.isnan(sorted_input).sum())
return sorted_input[(nonnan_index - 1) // 2]
nanmedian_tuple = namedtuple("nanmedian", ["values", "indices"])
if input.ndim == 0:
result = nanmedian_tuple(input, ivy.array(0))
else:
sorted_indices = ivy.argsort(input, axis=dim)
nonnan_index = (
sorted_indices.shape[dim] - ivy.isnan(input).sum(axis=1) - 1
) // 2
nonnan_index = ivy.expand_dims(nonnan_index, axis=1)
nanmedian_indices = ivy.gather_nd(sorted_indices, nonnan_index, batch_dims=1)
nanmedian_values = ivy.take_along_axis(
input, ivy.expand_dims(nanmedian_indices, axis=dim), dim
).squeeze(axis=dim)
if keepdim:
nanmedian_values = ivy.expand_dims(nanmedian_values, axis=dim)
nanmedian_indices = ivy.expand_dims(nanmedian_tuple, axis=dim)
result = nanmedian_tuple(nanmedian_values, nanmedian_indices)
if out is not None:
ivy.inplace_update(out[0], result.values)
ivy.inplace_update(out[1], result.indices)
return out
return result
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.2 and below": ("float", "int")},
"torch",
)
def nansum(input, dim=None, keepdim=False, *, dtype=None):
input = ivy.where(ivy.isnan(input), ivy.zeros_like(input), input)
return ivy.sum(input, axis=dim, dtype=dtype, keepdims=keepdim, out=None)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.2 and below": ("float", "complex")},
"torch",
)
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None):
if dtype is None or not ivy.is_float_dtype(dtype):
dtype = "float64" if "128" in str(dtype) else "float32"
if (
p == "fro" and (dim is None or isinstance(dim, int) or len(dim) <= 2)
) or p is None:
p = 2
if isinstance(p, str):
if dim is None:
dim = tuple(range(input.dim()))
return ivy.matrix_norm(
input, ord=p, axis=dim, keepdims=keepdim, out=out
).astype(dtype)
else:
return ivy.vector_norm(
input, ord=p, axis=dim, keepdims=keepdim, dtype=dtype, out=out
)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def prod(input, dim=None, keepdim=False, *, dtype=None):
if not dtype:
if "int" in input.dtype:
dtype = ivy.int64
return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def quantile(input, q, dim=None, keepdim=False, *, interpolation="linear", out=None):
return ivy.quantile(
input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out
)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bool", "integer")}, "torch")
def std(input, dim=None, unbiased=True, keepdim=False, *, out=None):
return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def std_mean(input, dim, unbiased, keepdim=False, *, out=None):
temp_std = ivy.std(
input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
)
temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
return temp_std, temp_mean
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
def sum(input, dim=None, keepdim=False, *, dtype=None, out=None):
return ivy.sum(input, axis=dim, dtype=dtype, keepdims=keepdim, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
if dim is not None:
sorted = True
results = ivy.unique_all(input, axis=dim, by_value=sorted)
ret = (results.values,) if return_counts or return_inverse else results.values
if return_inverse:
inverse_indices = results.inverse_indices
if dim is None:
inverse_indices = inverse_indices.reshape(input.shape)
ret += (inverse_indices,)
if return_counts:
ret += (results.counts,)
return ret
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"complex",
)
},
"torch",
)
@to_ivy_arrays_and_back
def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None):
output, inverse_indices, counts = ivy.unique_consecutive(input, axis=dim)
ret = (output,)
if return_inverse:
ret += (inverse_indices,)
if return_counts:
ret += (counts,)
return ret
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def var(input, dim, unbiased, keepdim=False, *, out=None):
return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
@numpy_to_torch_style_args
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
"torch",
)
def var_mean(input, dim, unbiased, keepdim=False, *, out=None):
temp_var = ivy.var(
input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
)
temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
return (temp_var, temp_mean)
| ivy/ivy/functional/frontends/torch/reduction_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/reduction_ops.py",
"repo_id": "ivy",
"token_count": 5780
} | 47 |
from . import regression_loss
from .regression_loss import *
| ivy/ivy/functional/frontends/xgboost/objective/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/objective/__init__.py",
"repo_id": "ivy",
"token_count": 16
} | 48 |
# local
import ivy
from ivy.func_wrapper import inputs_to_native_arrays
from ivy.utils.exceptions import handle_exceptions
# helpers
def _verify_coo_components(indices=None, values=None, dense_shape=None):
ivy.utils.assertions.check_all_or_any_fn(
indices,
values,
dense_shape,
fn=ivy.exists,
type="all",
message="indices, values and dense_shape must all be specified",
)
# coordinates style (COO), must be shaped (x, y)
ivy.utils.assertions.check_equal(
len(ivy.shape(indices)), 2, message="indices must be 2D", as_array=False
)
ivy.utils.assertions.check_equal(
len(ivy.shape(values)), 1, message="values must be 1D", as_array=False
)
ivy.utils.assertions.check_equal(
len(ivy.to_ivy_shape(dense_shape)),
ivy.shape(indices)[0],
message="shape and indices shape do not match",
as_array=False,
)
# number of values must match number of coordinates
ivy.utils.assertions.check_equal(
ivy.shape(values)[0],
ivy.shape(indices)[1],
message="values and indices do not match",
as_array=False,
)
for i in range(ivy.shape(indices)[0]):
ivy.utils.assertions.check_less(
indices[i],
ivy.to_ivy_shape(dense_shape)[i],
message="indices is larger than shape",
)
def _verify_common_row_format_components(
crow_indices=None, col_indices=None, values=None, dense_shape=None, format="csr"
):
ivy.utils.assertions.check_all_or_any_fn(
crow_indices,
col_indices,
values,
dense_shape,
fn=ivy.exists,
type="all",
message=(
"crow_indices, col_indices, values and dense_shape must all be specified."
),
)
ivy.utils.assertions.check_equal(
len(ivy.shape(crow_indices)),
1,
message="crow_indices must be 1D.",
as_array=False,
)
ivy.utils.assertions.check_equal(
len(ivy.shape(col_indices)),
1,
message="col_indices must be 1D.",
as_array=False,
)
ivy.utils.assertions.check_equal(
len(dense_shape),
2,
message=f"Only 2D arrays can be converted to {format.upper()} sparse arrays.",
as_array=False,
)
ivy.utils.assertions.check_equal(
ivy.shape(col_indices)[0],
crow_indices[-1],
message="size of col_indices does not match with last element of crow_indices",
)
# number of values must match number of coordinates
ivy.utils.assertions.check_equal(
ivy.shape(col_indices)[0],
ivy.shape(values)[0],
message="values and col_indices do not match",
as_array=False,
)
# index in crow_indices must not exceed length of col_indices
ivy.utils.assertions.check_less(
crow_indices,
ivy.shape(col_indices)[0],
allow_equal=True,
message="index in crow_indices does not match the number of col_indices",
)
def _verify_csr_components(
crow_indices=None, col_indices=None, values=None, dense_shape=None
):
_verify_common_row_format_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
format="csr",
)
ivy.utils.assertions.check_equal(
len(ivy.shape(values)), 1, message="values must be 1D.", as_array=False
)
# number of intervals must be equal to x in shape (x, y)
ivy.utils.assertions.check_equal(
ivy.shape(crow_indices)[0] - 1, dense_shape[0], as_array=False
)
ivy.utils.assertions.check_less(
col_indices,
dense_shape[1],
message="index in col_indices does not match shape",
)
def _verify_bsr_components(
crow_indices=None, col_indices=None, values=None, dense_shape=None
):
_verify_common_row_format_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
format="bsr",
)
ivy.utils.assertions.check_equal(
len(ivy.shape(values)), 3, message="values must be 3D.", as_array=False
)
nrowblocks, ncolblocks = ivy.shape(values)[-2:]
ivy.utils.assertions.check_equal(
dense_shape[0] % nrowblocks,
0,
message="The number of rows of array must be divisible by that of block.",
as_array=False,
)
ivy.utils.assertions.check_equal(
dense_shape[1] % ncolblocks,
0,
message="The number of cols of array must be divisible by that of block.",
as_array=False,
)
ivy.utils.assertions.check_equal(
ivy.shape(crow_indices)[0] - 1, dense_shape[0] // nrowblocks, as_array=False
)
ivy.utils.assertions.check_less(
col_indices,
dense_shape[1] // ncolblocks,
message="index in col_indices does not match shape",
)
def _verify_common_column_format_components(
ccol_indices=None, row_indices=None, values=None, dense_shape=None, format="csc"
):
ivy.utils.assertions.check_all_or_any_fn(
ccol_indices,
row_indices,
values,
dense_shape,
fn=ivy.exists,
type="all",
message=(
"ccol_indices, row_indices, values and dense_shape must all be specified"
),
)
ivy.utils.assertions.check_equal(
len(ivy.shape(ccol_indices)),
1,
message="ccol_indices must be 1D",
as_array=False,
)
ivy.utils.assertions.check_equal(
len(ivy.shape(row_indices)), 1, message="row_indices must be 1D", as_array=False
)
ivy.utils.assertions.check_equal(
len(dense_shape),
2,
message=f"only 2D arrays can be converted to {format.upper()} sparse arrays",
as_array=False,
)
# number of values must match number of coordinates
ivy.utils.assertions.check_equal(
ivy.shape(row_indices)[0],
ivy.shape(values)[0],
message="values and row_indices do not match",
as_array=False,
)
# index in ccol_indices must not exceed length of row_indices
ivy.utils.assertions.check_less(
ccol_indices,
ivy.shape(row_indices)[0],
allow_equal=True,
message="index in ccol_indices does not match the number of row_indices",
)
def _verify_csc_components(
ccol_indices=None, row_indices=None, values=None, dense_shape=None
):
_verify_common_column_format_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
format="csc",
)
ivy.utils.assertions.check_equal(
len(ivy.shape(values)), 1, message="values must be 1D", as_array=False
)
# number of intervals must be equal to y in shape (x, y)
ivy.utils.assertions.check_equal(
ivy.shape(ccol_indices)[0] - 1, dense_shape[1], as_array=False
)
ivy.utils.assertions.check_less(
row_indices,
dense_shape[0],
message="index in row_indices does not match shape",
)
def _verify_bsc_components(
ccol_indices=None, row_indices=None, values=None, dense_shape=None
):
_verify_common_column_format_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
format="bsc",
)
ivy.utils.assertions.check_equal(
len(ivy.shape(values)), 3, message="values must be 3D", as_array=False
)
nrowblocks, ncolblocks = ivy.shape(values)[-2:]
ivy.utils.assertions.check_equal(
dense_shape[0] % nrowblocks,
0,
message="number of rows of array must be divisible by that of block.",
as_array=False,
)
ivy.utils.assertions.check_equal(
dense_shape[1] % ncolblocks,
0,
message="number of cols of array must be divisible by that of block.",
as_array=False,
)
# number of intervals must be equal to y in shape (x, y)
ivy.utils.assertions.check_equal(
ivy.shape(ccol_indices)[0] - 1, dense_shape[1] // ncolblocks, as_array=False
)
ivy.utils.assertions.check_less(
row_indices,
dense_shape[0] // nrowblocks,
message="index in row_indices does not match shape",
)
def _is_data_not_indices_values_and_shape(
data=None,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format=None,
):
if data is not None:
ivy.utils.assertions.check_all_or_any_fn(
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
format,
fn=ivy.exists,
type="any",
limit=[0],
message=(
"Only specify data, coo_indices for COO format, crow_indices and"
" col_indices for CSR and BSR, ccol_indices and row_indicesfor CSC and"
" BSC."
),
)
return True
return False
def _is_valid_format(
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo",
):
valid_formats = ["coo", "csr", "csc", "csc", "bsc", "bsr"]
if not isinstance(format, str) or format.lower() not in valid_formats:
return False
if format.endswith("o"):
# format is coo
return (
ivy.exists(coo_indices)
and ivy.exists(values)
and ivy.exists(dense_shape)
and crow_indices is None
and col_indices is None
and ccol_indices is None
and row_indices is None
)
if format.endswith("r"):
# format is either csr or bsr
return (
ivy.exists(crow_indices)
and ivy.exists(col_indices)
and ivy.exists(values)
and ivy.exists(dense_shape)
and coo_indices is None
and ccol_indices is None
and row_indices is None
)
# format is either csc or bsc
return (
ivy.exists(ccol_indices)
and ivy.exists(row_indices)
and ivy.exists(values)
and ivy.exists(dense_shape)
and coo_indices is None
and crow_indices is None
and col_indices is None
)
class SparseArray(ivy.Array):
def __init__(
self,
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format=None,
):
if _is_data_not_indices_values_and_shape(
data,
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
):
self._init_data(data)
elif _is_valid_format(
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
format=format,
):
format = format.lower()
if format == "coo":
self._init_coo_components(coo_indices, values, dense_shape, format)
elif format in ["csr", "bsr"]:
self._init_compressed_row_components(
crow_indices, col_indices, values, dense_shape, format
)
else:
print(format)
self._init_compressed_column_components(
ccol_indices, row_indices, values, dense_shape, format
)
else:
print(
format,
ccol_indices,
row_indices,
values,
dense_shape,
crow_indices,
col_indices,
values,
)
raise ivy.utils.exceptions.IvyException(
"specify all coo components (coo_indices, values and "
" dense_shape), all csr components (crow_indices, "
"col_indices, values and dense_shape), all csc components "
"(ccol_indices, row_indices, values and dense_shape). all "
"bsc components (ccol_indices, row_indices, values and "
"dense_shape), or all bsr components (crow_indices, "
"col_indices, values and dense_shape)."
)
# initialize parent class
super().__init__(self)
def _init_data(self, data):
if ivy.is_ivy_sparse_array(data):
self._data = data.data
self._coo_indices = data.coo_indices
self._crow_indices = data.crow_indices
self._col_indices = data.col_indices
self._ccol_indices = data.ccol_indices
self._row_indices = data.row_indices
self._values = data.values
self._dense_shape = data.dense_shape
self._format = data.format.lower()
else:
ivy.utils.assertions.check_true(
ivy.is_native_sparse_array(data), message="not a native sparse array"
)
self._data = data
self._native_sparse_array_to_indices_values_and_shape()
def _native_sparse_array_to_indices_values_and_shape(self):
indices, values, shape = ivy.native_sparse_array_to_indices_values_and_shape(
self._data
)
if "coo_indices" in indices:
self._coo_indices = ivy.array(indices["coo_indices"], dtype="int64")
self._crow_indices = None
self._col_indices = None
self._ccol_indices = None
self._row_indices = None
elif "crow_indices" in indices and "col_indices" in indices:
self._crow_indices = ivy.array(indices["crow_indices"], dtype="int64")
self._col_indices = ivy.array(indices["col_indices"], dtype="int64")
self._coo_indices = None
self._ccol_indices = None
self._row_indices = None
else:
self._ccol_indices = ivy.array(indices["ccol_indices"], dtype="int64")
self._row_indices = ivy.array(indices["row_indices"], dtype="int64")
self._coo_indices = None
self._crow_indices = None
self._col_indices = None
self._values = ivy.array(values)
self._dense_shape = ivy.Shape(shape)
self._format = self._data.format.lower()
def _init_coo_components(self, coo_indices, values, shape, format):
coo_indices = ivy.array(coo_indices, dtype="int64")
values = ivy.array(values)
shape = ivy.Shape(shape)
self._data = ivy.native_sparse_array(
coo_indices=coo_indices, values=values, dense_shape=shape, format=format
)
self._coo_indices = coo_indices
self._values = values
self._dense_shape = shape
self._format = format
self._crow_indices = None
self._col_indices = None
self._ccol_indices = None
self._row_indices = None
def _init_compressed_row_components(
self, crow_indices, col_indices, values, shape, format
):
crow_indices = ivy.array(crow_indices, dtype="int64")
col_indices = ivy.array(col_indices, dtype="int64")
values = ivy.array(values)
shape = ivy.Shape(shape)
self._data = ivy.native_sparse_array(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=shape,
format=format,
)
self._crow_indices = crow_indices
self._col_indices = col_indices
self._values = values
self._dense_shape = shape
self._format = format
self._coo_indices = None
self._ccol_indices = None
self._row_indices = None
def _init_compressed_column_components(
self, ccol_indices, row_indices, values, shape, format
):
ccol_indices = ivy.array(ccol_indices, dtype="int64")
row_indices = ivy.array(row_indices, dtype="int64")
values = ivy.array(values)
shape = ivy.Shape(shape)
self._data = ivy.native_sparse_array(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=shape,
format=format,
)
self._ccol_indices = ccol_indices
self._row_indices = row_indices
self._values = values
self._dense_shape = shape
self._format = format
self._coo_indices = None
self._crow_indices = None
self._col_indices = None
def __repr__(self):
if self._dev_str is None:
self._dev_str = ivy.as_ivy_dev(self.device)
self._pre_repr = "ivy.sparse_array"
if "gpu" in self._dev_str:
self._post_repr = f", dev={self._dev_str})"
else:
self._post_repr = ")"
if self._format == "coo":
repr = (
f"indices={self._coo_indices}, values={self._values},"
f" dense_shape={self._dense_shape}"
)
elif self._format in ["csr", "bsr"]:
repr = (
f"crow_indices={self._crow_indices}, col_indices={self._col_indices},"
f" values={self._values}, dense_shape={self._dense_shape}"
)
else:
repr = (
f"ccol_indices={self._ccol_indices}, row_indices={self._row_indices},"
f" values={self._values}, dense_shape={self._dense_shape}"
)
return (
self._pre_repr
+ "("
+ repr
+ f", format={self._format}"
+ self._post_repr.format(ivy.current_backend_str())
)
# Properties #
# -----------#
@property
def data(self):
return self._data
@property
def coo_indices(self):
return self._coo_indices
@property
def crow_indices(self):
return self._crow_indices
@property
def col_indices(self):
return self._col_indices
@property
def ccol_indices(self):
return self._ccol_indices
@property
def row_indices(self):
return self._row_indices
@property
def values(self):
return self._values
@property
def dense_shape(self):
return self._dense_shape
@property
def format(self):
return self._format
# Setters #
# --------#
@data.setter
def data(self, data):
self._init_data(data)
@coo_indices.setter
def coo_indices(self, indices):
indices = ivy.array(indices, dtype="int64")
_verify_coo_components(
indices=indices, values=self._values, dense_shape=self._dense_shape
)
self._coo_indices = indices
@crow_indices.setter
def crow_indices(self, indices):
indices = ivy.array(indices, dtype="int64")
if self._format == "csr":
_verify_csr_components(
crow_indices=indices,
col_indices=self._col_indices,
values=self._values,
dense_shape=self._dense_shape,
)
else:
_verify_bsr_components(
crow_indices=indices,
col_indices=self._col_indices,
values=self._values,
dense_shape=self._dense_shape,
)
self._crow_indices = indices
@col_indices.setter
def col_indices(self, indices):
indices = ivy.array(indices, dtype="int64")
if self._format == "csr":
_verify_csr_components(
crow_indices=indices,
col_indices=self._col_indices,
values=self._values,
dense_shape=self._dense_shape,
)
else:
_verify_bsr_components(
crow_indices=indices,
col_indices=self._col_indices,
values=self._values,
dense_shape=self._dense_shape,
)
self._col_indices = indices
@ccol_indices.setter
def ccol_indices(self, indices):
indices = ivy.array(indices, dtype="int64")
if self._format == "csc":
_verify_csc_components(
ccol_indices=indices,
row_indices=self._row_indices,
values=self._values,
dense_shape=self._dense_shape,
)
else:
_verify_bsc_components(
ccol_indices=indices,
row_indices=self._row_indices,
values=self._values,
dense_shape=self._dense_shape,
)
self._ccol_indices = indices
@row_indices.setter
def row_indices(self, indices):
indices = ivy.array(indices, dtype="int64")
if self._format == "csc":
_verify_csc_components(
ccol_indices=self._ccol_indices,
row_indices=indices,
values=self._values,
dense_shape=self._dense_shape,
)
else:
_verify_bsc_components(
ccol_indices=self._ccol_indices,
row_indices=indices,
values=self._values,
dense_shape=self._dense_shape,
)
self._row_indices = indices
@values.setter
def values(self, values):
values = ivy.array(values)
_verify_coo_components(
indices=self._coo_indices, values=values, dense_shape=self._dense_shape
)
self._values = values
@dense_shape.setter
def dense_shape(self, dense_shape):
dense_shape = ivy.Shape(dense_shape)
_verify_coo_components(
indices=self._coo_indices, values=self._values, dense_shape=dense_shape
)
self._dense_shape = dense_shape
@format.setter
def format(self, format):
self._format = format
# Instance Methods #
# ---------------- #
def _coo_to_dense_coordinates(self):
all_coordinates = []
for i in range(self._values.shape[0]):
coordinate = ivy.gather(self._coo_indices, ivy.array([[i]]))
coordinate = ivy.reshape(coordinate, (self._coo_indices.shape[0],))
all_coordinates.append(coordinate.to_list())
return all_coordinates
def _csr_to_dense_coordinates(self):
all_coordinates = []
total_rows = self._dense_shape[0]
all_rows = self._col_indices.to_list()
all_cols = self._crow_indices.to_list()
for row in range(total_rows):
cols = all_rows[all_cols[row] : all_cols[row + 1]]
for col in cols:
all_coordinates.append([row, col])
return all_coordinates
def _csc_to_dense_coordinates(self):
# CSC sparse array
all_coordinates = []
total_rows = self._dense_shape[1]
all_cols = self._row_indices.to_list()
all_rows = self._ccol_indices.to_list()
for col in range(total_rows):
rows = all_cols[all_rows[col] : all_rows[col + 1]]
for row in rows:
all_coordinates.append([row, col])
return all_coordinates
def _bsr_to_dense_coordinates(self):
all_coordinates = []
total_rows = self._dense_shape[0]
all_rows = self._crow_indices.to_list()
all_cols = self._col_indices.to_list()
nblockrows, nblockcols = self._values.shape[-2:]
for row in range(total_rows // nblockrows):
cols = all_cols[all_rows[row] : all_rows[row + 1]]
for col in cols:
for col_index in range(nblockcols):
for row_index in range(nblockrows):
all_coordinates.append(
[
nblockrows * row + row_index,
nblockcols * col + col_index,
]
)
return all_coordinates
def _bsc_to_dense_coordinates(self):
all_coordinates = []
total_cols = self._dense_shape[1]
all_rows = self._row_indices.to_list()
all_cols = self._ccol_indices.to_list()
nblockrows, nblockcols = self._values.shape[-2:]
for col in range(total_cols // nblockcols):
rows = all_rows[all_cols[col] : all_cols[col + 1]]
for row in rows:
for col_index in range(nblockcols):
for row_index in range(nblockrows):
all_coordinates.append(
[
nblockrows * row + row_index,
nblockcols * col + col_index,
]
)
return all_coordinates
def to_dense_array(self, *, native=False):
if self._format == "coo":
all_coordinates = self._coo_to_dense_coordinates()
elif self._format == "csr":
all_coordinates = self._csr_to_dense_coordinates()
elif self._format == "csc":
all_coordinates = self._csc_to_dense_coordinates()
elif self._format == "bsc":
all_coordinates = self._bsc_to_dense_coordinates()
else:
all_coordinates = self._bsr_to_dense_coordinates()
# make dense array
ret = ivy.scatter_nd(
ivy.array(all_coordinates),
ivy.flatten(self._values),
ivy.array(self._dense_shape),
)
return ret.to_native() if native else ret
class NativeSparseArray:
pass
def is_ivy_sparse_array(x):
return isinstance(x, ivy.SparseArray)
@handle_exceptions
@inputs_to_native_arrays
def is_native_sparse_array(x):
return ivy.current_backend().is_native_sparse_array(x)
@handle_exceptions
@inputs_to_native_arrays
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format=None,
):
return ivy.current_backend().native_sparse_array(
data,
coo_indices=coo_indices,
crow_indices=crow_indices,
col_indices=col_indices,
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
format=format,
)
@handle_exceptions
def native_sparse_array_to_indices_values_and_shape(x):
return ivy.current_backend().native_sparse_array_to_indices_values_and_shape(x)
| ivy/ivy/functional/ivy/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 13662
} | 49 |
# global
from typing import Union, Optional, Sequence
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Helpers #
# --------#
def _get_promoted_type_of_operands(operands):
dtype = None
for operand in operands:
operand_dtype = ivy.as_ivy_dtype(operand.dtype)
if dtype is None:
dtype = operand_dtype
else:
dtype = ivy.promote_types(dtype, operand_dtype)
return ivy.as_native_dtype(dtype)
# Array API Standard #
# -------------------#
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def min(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the minimum value of the input array ``x``.
.. note::
When the number of elements over which to compute the minimum value is zero, the
minimum value is implementation-defined. Specification-compliant libraries may
choose to raise an error, return a sentinel value (e.g., if ``x`` is a
floating-point input array, return ``NaN``), or return the maximum possible value
for the input array ``x`` data type (e.g., if ``x`` is a floating-point array,
return ``+infinity``).
**Special Cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the minimum value is ``NaN``
(i.e., ``NaN`` values propagate).
Parameters
----------
x
Input array. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed. By default, the
minimum value must be computed over the entire array. If a tuple of integers,
minimum values must be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions) must be included
in the result as singleton dimensions, and, accordingly, the result must be
compatible with the input array (see :ref:`broadcasting`). Otherwise,
if ``False``, the reduced axes (dimensions) must not be included in the result.
Default: ``False``.
initial
The maximum value of an output element.
Must be present to allow computation on empty slice.
where
Elements to compare for minimum
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array, a zero-dimensional
array containing the minimum value; otherwise, a non-zero-dimensional array
containing the minimum values. The returned array must have the same data type
as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.min.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> z = ivy.min(x)
>>> print(z)
ivy.array(1)
>>> x = ivy.array([0, 1, 2])
>>> z = ivy.array([0, 0, 0])
>>> y = ivy.min(x, out=z)
>>> print(z)
ivy.array(0)
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.min(x, axis=0, keepdims=True)
>>> print(y)
ivy.array([[0, 1, 2]])
>>> x = ivy.native_array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.min(x)
>>> print(y)
ivy.array(0)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([2, 3, 4]))
>>> z = ivy.min(x)
>>> print(z)
{
a: ivy.array(1),
b: ivy.array(2)
}
"""
return current_backend(x).min(
x, axis=axis, keepdims=keepdims, initial=initial, where=where, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def max(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the maximum value of the input array ``x``.
.. note::
When the number of elements over which to compute the maximum value is zero, the
maximum value is implementation-defined. Specification-compliant libraries may
choose to raise an error, return a sentinel value (e.g., if ``x`` is a
floating-point input array, return ``NaN``), or return the minimum possible
value for the input array ``x`` data type (e.g., if ``x`` is a floating-point
array, return ``-infinity``).
**Special Cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the maximum value is ``NaN`` (i.e., ``NaN`` values
propagate).
Parameters
----------
x
input array. Should have a numeric data type.
axis
axis or axes along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. If a tuple of integers,
maximum values must be computed over multiple axes. Default: ``None``.
keepdims
if ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the maximum value was computed over the entire array, a zero-dimensional
array containing the maximum value; otherwise, a non-zero-dimensional array
containing the maximum values. The returned array must have the same data type
as ``x``.
This method conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.max.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> z = ivy.max(x)
>>> print(z)
ivy.array(3)
>>> x = ivy.array([0, 1, 2])
>>> z = ivy.array(0)
>>> y = ivy.max(x, out=z)
>>> print(z)
ivy.array(2)
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.max(x, axis=0, keepdims=True)
>>> print(y)
ivy.array([[4, 6, 10]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.max(x)
>>> print(y)
{
a: ivy.array(2.),
b: ivy.array(5.)
}
>>> x = ivy.Container(a=ivy.array([[1, 2, 3],[-1,0,2]]),
... b=ivy.array([[2, 3, 4], [0, 1, 2]]))
>>> z = ivy.max(x, axis=1)
>>> print(z)
{
a: ivy.array([3, 2]),
b: ivy.array([4, 2])
}
"""
return current_backend(x).max(x, axis=axis, keepdims=keepdims, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def mean(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the arithmetic mean of the input array ``x``.
**Special Cases**
Let ``N`` equal the number of elements over which to compute the arithmetic mean.
- If ``N`` is ``0``, the arithmetic mean is ``NaN``.
- If ``x_i`` is ``NaN``, the arithmetic mean is ``NaN`` (i.e., ``NaN`` values
propagate).
Parameters
----------
x
input array. Should have a floating-point data type.
axis
axis or axes along which arithmetic means must be computed. By default, the mean
must be computed over the entire array. If a Sequence of integers, arithmetic
means must be computed over multiple axes. Default: ``None``.
keepdims
bool, if ``True``, the reduced axes (dimensions) must be included in the result
as singleton dimensions, and, accordingly, the result must be compatible with
the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced
axes (dimensions) must not be included in the result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
array, if the arithmetic mean was computed over the entire array, a
zero-dimensional array containing the arithmetic mean; otherwise, a
non-zero-dimensional array containing the arithmetic means. The returned
array must have the same data type as ``x``.
.. note::
While this specification recommends that this function only accept input
arrays having a floating-point data type, specification-compliant array
libraries may choose to accept input arrays having an integer data type.
While mixed data type promotion is implementation-defined, if the input
array ``x`` has an integer data type, the returned array must have the
default floating-point data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/API_specification/generated/
signatures.statistical_functions.mean.html>`_ in the standard.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3., 4., 5.])
>>> y = ivy.mean(x)
>>> print(y)
ivy.array(4.)
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.array(0.)
>>> ivy.mean(x, out=y)
>>> print(y)
ivy.array(1.)
>>> x = ivy.array([[-1., -2., -3., 0., -1.], [1., 2., 3., 0., 1.]])
>>> y = ivy.array([0., 0.])
>>> ivy.mean(x, axis=1, out=y)
>>> print(y)
ivy.array([-1.4, 1.4])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1., 0., 1.]), b=ivy.array([1.1, 0.2, 1.4]))
>>> y = ivy.mean(x)
>>> print(y)
{
a: ivy.array(0.),
b: ivy.array(0.90000004)
}
>>> x = ivy.Container(a=ivy.array([[0., 1., 2.], [3., 4., 5.]]),
... b=ivy.array([[3., 4., 5.], [6., 7., 8.]]))
>>> y = ivy.Container(a = ivy.zeros(3), b = ivy.zeros(3))
>>> ivy.mean(x, axis=0, out=y)
>>> print(y)
{
a: ivy.array([1.5, 2.5, 3.5]),
b: ivy.array([4.5, 5.5, 6.5])
}
"""
return current_backend(x).mean(x, axis=axis, keepdims=keepdims, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def prod(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the product of input array x elements.
**Special Cases**
Let ``N`` equal the number of elements over which to compute the product.
- If ``N`` is ``0``, the product is ``1`` (i.e., the empty product).
For both both real-valued and complex floating-point operands, special
cases must be handled as the operation is implemented by successive application
of :func:`ivy.multiply`:
Parameters
----------
x
input array. Should have a numeric data type.
axis
axis or axes along which products must be computed. By default, the product must
be computed over the entire array. If a tuple of integers, products must be
computed over multiple axes. Default: ``None``.
keepdims
bool, if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
dtype
data type of the returned array. If None,
if the default data type corresponding to the data type “kind” (integer or
floating-point) of x has a smaller range of values than the data type of x
(e.g., x has data type int64 and the default data type is int32, or x has data
type uint64 and the default data type is int64), the returned array must have
the same data type as x. if x has a floating-point data type, the returned array
must have the default floating-point data type. if x has a signed integer data
type (e.g., int16), the returned array must have the default integer data type.
if x has an unsigned integer data type (e.g., uint16), the returned array must
have an unsigned integer data type having the same number of bits as the default
integer data type (e.g., if the default integer data type is int32, the returned
array must have a uint32 data type). If the data type (either specified or
resolved) differs from the data type of x, the input array should be cast to the
specified data type before computing the product. Default: ``None``.
out
optional output array, for writing the result to.
Returns
-------
ret
array, if the product was computed over the entire array, a zero-dimensional
array containing the product; otherwise, a non-zero-dimensional array containing
the products. The returned array must have a data type as described by the dtype
parameter above.
This method conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.prod.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> z = ivy.prod(x)
>>> print(z)
ivy.array(6)
>>> x = ivy.array([1, 0, 3])
>>> z = ivy.prod(x)
>>> print(z)
ivy.array(0)
>>> x = ivy.array([[3., 4., 5.]])
>>> y = ivy.prod(x, keepdims=True)
>>> print(y)
ivy.array([60.])
>>> x = ivy.array([2., 1.])
>>> y = ivy.array(0.)
>>> ivy.prod(x, out=y)
>>> print(y)
ivy.array(2.)
>>> x = ivy.array([[-1., -2.], [3., 3.]])
>>> y = ivy.prod(x, axis=1)
>>> print(y)
ivy.array([2., 9.])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1., 0., 1.]), b=ivy.array([1.1, 0.2, 1.4]))
>>> y = ivy.prod(x)
>>> print(y)
{
a: ivy.array(-0.),
b: ivy.array(0.30800003)
}
>>> x = ivy.Container(a=ivy.array([[1., 2.], [3., 4.]]),
... b=ivy.array([[ 4., 5.], [5., 6.]]))
>>> y = ivy.prod(x, axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[2.],
[12.]]),
b: ivy.array([[20.],
[30.]])
}
"""
return current_backend(x).prod(
x, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def std(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the standard deviation of the input array ``x``.
**Special Cases**
Let ``N`` equal the number of elements over which to compute the standard deviation.
- If ``N - correction`` is less than or equal to ``0``,
the standard deviation is ``NaN``.
- If ``x_i`` is ``NaN``, the standard deviation is ``NaN``
(i.e., ``NaN`` values propagate).
Parameters
----------
x
input array.
axis
axis or axes along which standard deviations must be computed. By default, the
standard deviation must be computed over the entire array. If a tuple of
integers, standard deviations must be computed over multiple axes.
Default: ``None``.
correction
degrees of freedom adjustment. Setting this parameter to a value other
than ``0`` has the effect of adjusting the divisor during the calculation of the
standard deviation according to ``N-c`` where ``N`` corresponds to the total
number of elements over which the standard deviation is computed and ``c``
corresponds to the provided degrees of freedom adjustment. When computing the
standard deviation of a population, setting this parameter to ``0`` is the
standard choice (i.e., the provided array contains data constituting an
entire population). When computing the corrected sample standard deviation,
setting this parameter to ``1`` is the standard choice (i.e., the provided array
contains data sampled from a larger population; this is commonly referred to as
Bessel's correction).
Default: ``0``.
keepdims
if ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the standard deviation was computed over the entire array, a zero-dimensional
array containing the standard deviation; otherwise, a non-zero-dimensional array
containing the standard deviations. The returned array must have the same data
type as ``x``.
.. note::
While this specification recommends that this function only accept input
arrays having a real-valued floating-point data type, specification-compliant
array libraries may choose to accept input arrays having an integer data
type. While mixed data type promotion is implementation-defined, if the input
array ``x`` has an integer data type, the returned array must have
the default real-valued floating-point data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.std.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([-1., 0., 1.])
>>> y = ivy.std(x)
>>> print(y)
ivy.array(0.81649661)
>>> x = ivy.array([-1., 0., 1.])
>>> z = ivy.std(x, correction=1)
>>> print(z)
ivy.array(1.)
>>> x = ivy.array([[0., 4.]])
>>> y = ivy.std(x, keepdims=True)
>>> print(y)
ivy.array([[2.]])
>>> x = ivy.array([2., 1.])
>>> y = ivy.array(0.)
>>> ivy.std(x, out=y)
>>> print(y)
ivy.array(0.5)
>>> x = ivy.array([[-1., -2.], [3., 3.]])
>>> y = ivy.std(x, axis=1)
>>> print(y)
ivy.array([0.5, 0. ])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1., 0., 1.]), b=ivy.array([1.1, 0.2, 1.4]))
>>> y = x.std()
>>> print(y)
{
a: ivy.array(0.81649661),
b: ivy.array(0.509902)
}
>>> x = ivy.Container(a=ivy.array([[1., 3.], [3., 6.]]),
... b=ivy.array([[ 4., 2.], [2., 1.]]))
>>> y = x.std(axis=1, keepdims=True)
>>> print(y)
{
a: ivy.array([[1.],
[1.5]]),
b: ivy.array([[1.],
[0.5]])
}
"""
return current_backend(x).std(
x, axis=axis, correction=correction, keepdims=keepdims, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def sum(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
keepdims: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the sum of the input array x.
**Special Cases**
Let ``N`` equal the number of elements over which to compute the sum.
- If ``N`` is ``0``, the sum is ``0`` (i.e., the empty sum).
For floating-point operands,
- If ``x_i`` is ``NaN``, the sum is ``NaN`` (i.e., ``NaN`` values propagate).
For both real-valued and complex floating-point operands, special cases must
be handled as if the operation is implemented by successive application of
:func:`ivy.add`:
Parameters
----------
x
Input array. Should have a numeric data type.
axis
Axis or axes along which sums must be computed. By default, the sum must be
computed over the entire array. If a tuple of integers, sums must be computed
over multiple axes. Default: ``None``.
dtype
Data type of the returned array. If ``None``,
If the default data type corresponding to the data type "kind" (integer or
floating-point) of ``x`` has a smaller range of values than the data type of
``x`` (e.g., ``x`` has data type ``int64`` and the default data type is
``int32``, or ``x`` has data type ``uint64`` and the default data type is
``int64``), the returned array must have the same data type as ``x``.
If ``x`` has a floating-point data type, the returned array must have the
default floating-point data type.
If ``x`` has a signed integer data type (e.g., ``int16``), the returned
array must have the default integer data type.
If ``x`` has an unsigned integer data type (e.g., ``uint16``), the returned
array must have an unsigned integer data type having the same number of bits
as the default integer data type (e.g., if the default integer data type is
``int32``, the returned array must have a ``uint32`` data type).
If the data type (either specified or resolved) differs from the data type of
``x``, the input array should be cast to the specified data type before
computing the sum. Default: ``None``.
.. note::
keyword argument is intended to help prevent data type overflows.
keepdims
If ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
If the sum was computed over the entire array, a zero-dimensional array
containing the sum; otherwise, an array containing the sums. The returned array
must have a data type as described by the ``dtype`` parameter above.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.sum.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.41, 0.89])
>>> y = ivy.sum(x)
>>> print(y)
ivy.array(1.3)
>>> x = ivy.array([0.5, 0.7, 2.4])
>>> y = ivy.array(0.0)
>>> ivy.sum(x, out=y)
>>> print(y)
ivy.array(3.6)
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.sum(x, axis = 1, keepdims = False)
>>> print(y)
ivy.array([3, 20])
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.array([0,0,0])
>>> ivy.sum(x, axis = 0, keepdims = False, out = y)
>>> print(y)
ivy.array([4, 7, 12])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([0.1, 0.2, 0.3, 0.3, 0.9, 0.10])
>>> y = ivy.sum(x)
>>> print(y)
ivy.array(1.9)
>>> x = ivy.native_array([1.0, 2.0, 2.0, 3.0])
>>> y = ivy.array(0.0)
>>> ivy.sum(x, out=y)
>>> print(y)
ivy.array(8.)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.sum(x)
>>> print(y)
{
a: ivy.array(3.),
b: ivy.array(12.)
}
"""
return current_backend(x).sum(x, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def var(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the variance of the input array x.
**Special Cases**
Let N equal the number of elements over which to compute the variance.
If N - correction is less than or equal to 0, the variance is NaN.
If x_i is NaN, the variance is NaN (i.e., NaN values propagate).
Parameters
----------
x
input array. Should have a floating-point data type.
axis
axis or axes along which variances must be computed. By default, the variance
must be computed over the entire array. If a tuple of integers, variances must
be computed over multiple axes. Default: ``None``.
correction
degrees of freedom adjustment. Setting this parameter to a value other than 0
has the effect of adjusting the divisor during the calculation of the variance
according to N-c where N corresponds to the total number of elements over which
the variance is computed and c corresponds to the provided degrees of freedom
adjustment. When computing the variance of a population, setting this parameter
to 0 is the standard choice (i.e., the provided array contains data constituting
an entire population). When computing the unbiased sample variance, setting this
parameter to 1 is the standard choice (i.e., the provided array contains data
sampled from a larger population; this is commonly referred to as Bessel's
correction). Default: ``0``.
keepdims
if True, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with the
input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the variance was computed over the entire array, a zero-dimensional array
containing the variance; otherwise, a non-zero-dimensional array containing the
variances. The returned array must have the same data type as x.
This method conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.var.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.1, 0.2, 0.3, 0.3, 0.9, 0.10])
>>> y = ivy.var(x)
>>> print(y)
ivy.array(0.07472222)
>>> x = ivy.array([0.1, 0.2, 0.3, 0.3, 0.9, 0.10])
>>> y = ivy.array(0.0)
>>> ivy.var(x, out=y)
>>> print(y)
ivy.array(0.07472222)
>>> x = ivy.array([[0.1, 0.2, 0.3], [0.3, 0.9, 0.10]])
>>> print(ivy.var(x, axis=1, keepdims=True))
ivy.array([[0.00666667],
[0.11555555]])
>>> x = ivy.array([[0.1, 0.2, 0.3], [0.3, 0.9, 0.10]])
>>> y = ivy.var(x, correction=1)
>>> print(y)
ivy.array(0.08966666)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.1, 0.2, 0.9]),
... b=ivy.array([0.7, 0.1, 0.9]))
>>> y = ivy.var(x)
>>> print(y)
{
a: ivy.array(0.12666667),
b: ivy.array(0.11555555)
}
"""
return current_backend(x).var(
x, axis=axis, correction=correction, keepdims=keepdims, out=out
)
# Extra #
# ------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def cumsum(
x: Union[ivy.Array, ivy.NativeArray],
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the cumulative sum of the elements along a given axis.
Parameters
----------
x
Input array.
axis
Axis along which the cumulative sum is computed. Default is ``0``.
exclusive
Whether to perform cumsum exclusively. Default is ``False``.
reverse
Whether to perform the cumsum from last to first element in the selected
axis. Default is ``False`` (from first to last element)
dtype
Data type of the returned array. Default is ``None``.
If None, if the default data type corresponding to the data type “kind”
(integer or floating-point) of x has a smaller range of values than the
data type of x (e.g., x has data type int64 and the default data type
is int32, or x has data type uint64 and the default data type is int64),
the returned array must have the same data type as x.
If x has a floating-point data type, the returned array must have the
default floating-point data type.
If x has a signed integer data type (e.g., int16), the returned array
must have the default integer data type.
If x has an unsigned integer data type (e.g., uint16), the returned
array must have an unsigned integer data type having the same number of
bits as the default integer data type (e.g., if the default integer data
type is int32, the returned array must have a uint32 data type).
If the data type (either specified or resolved) differs from the data type
of x, the input array should be cast to the specified data type before
computing the product.
out
Optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Array which holds the result of applying cumsum at each
original array elements along the specified axis.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 5, 2, 0])
>>> y = ivy.cumsum(x, exclusive= True, reverse=False)
>>> print(y)
ivy.array([0, 1, 6, 8])
>>> x = ivy.array([[6, 4, 2],
... [1, 3, 0]])
>>> y = ivy.zeros((2,3))
>>> ivy.cumsum(x, axis=0, exclusive=False, reverse=True, out=y)
>>> print(y)
ivy.array([[7, 7, 2],
[1, 3, 0]])
>>> x = ivy.array([[1, 5, 2],
... [4, 3, 0]])
>>> y = ivy.cumsum(x, axis=0, exclusive=True, reverse=True)
>>> print(y)
ivy.array([[4, 3, 0],
[0, 0, 0]])
>>> x = ivy.array([[2, 4, 5],
... [3, 6, 5],
... [1, 3, 10]])
>>> ivy.cumsum(x,axis=1,reverse=True, dtype='int64', out=x)
>>> print(x)
ivy.array([[11, 9, 5],
[14, 11, 5],
[14, 13, 10]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1, 3, 5]]),
... b=ivy.array([[3, 5, 7]]))
>>> y = ivy.cumsum(x, axis= 0)
>>> print(y)
{
a: ivy.array([[1, 3, 5]]),
b: ivy.array([[3, 5, 7]])
}
>>> x = ivy.Container(a=ivy.array([[1, 3, 4]]),
... b=ivy.array([[3, 5, 8],
... [5, 6, 5]]),
... c=ivy.array([[2, 4, 1],
... [3, 6, 9],
... [0, 2, 3]]))
>>> y = ivy.Container(a = ivy.zeros((1, 3)),
... b = ivy.zeros((2, 3)),
... c = ivy.zeros((3,3)))
>>> ivy.cumsum(x,axis=1,reverse=True, out=y)
>>> print(y)
{
a: ivy.array([[8, 7, 4]]),
b: ivy.array([[16, 13, 8],
[16, 11, 5]]),
c: ivy.array([[7, 5, 1],
[18, 15, 9],
[5, 5, 3]])
}
>>> x = ivy.Container(a=ivy.array([[0],
... [5]]),
... b=ivy.array([[6, 8, 7],
... [4, 2, 3]]),
... c=ivy.array([[1, 2],
... [3, 4],
... [6, 4]]))
>>> ivy.cumsum(x,axis=0,out=x)
>>> print(x)
{
a: ivy.array([[0],
[5]]),
b: ivy.array([[6, 8, 7],
[10, 10, 10]]),
c: ivy.array([[1, 2],
[4, 6],
[10, 10]])
}
"""
return current_backend(x).cumsum(x, axis, exclusive, reverse, dtype=dtype, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def cumprod(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the cumulative product of the elements along a given axis.
Parameters
----------
x
Input array.
axis
int , axis along which the cumulative product is computed. By default 0.
exclusive
optional bool, Whether to perform the cumprod exclusively. Defaults is False.
reverse
Whether to perform the cumprod from last to first element in the selected
axis. Default is ``False`` (from first to last element)
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Input array with cumulatively multiplied elements along axis.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([2, 3, 4])
>>> y = ivy.cumprod(x)
>>> print(y)
ivy.array([2, 6, 24])
>>> x = ivy.array([2, 3, 4])
>>> y = ivy.cumprod(x, exclusive=True)
>>> print(y)
ivy.array([1, 2, 6])
>>> x = ivy.array([[2, 3],[5, 7],[11, 13]])
>>> y = ivy.zeros((3, 2))
>>> ivy.cumprod(x, axis=1, exclusive=True, out=y)
>>> print(y)
ivy.array([[ 1., 2.],
[ 1., 5.],
[ 1., 11.]])
>>> x = ivy.array([[2, 3],[5, 7],[11, 13]])
>>> ivy.cumprod(x, axis=0, exclusive=True, out=x)
>>> print(x)
ivy.array([[1, 1],
[2, 3],
[10, 21]])
>>> x = ivy.array([[2, 3],[5, 7],[11, 13]])
>>> y = ivy.zeros((3, 2))
>>> x.cumprod(axis=0, exclusive=True, out=y)
>>> print(y)
ivy.array([[1., 1.],
[2., 3.],
[10., 21.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([2, 3, 4]), b=ivy.array([3, 4, 5]))
>>> y = ivy.cumprod(x)
>>> print(y)
{
a: ivy.array([2, 6, 24]),
b: ivy.array([3, 12, 60])
}
>>> x = ivy.Container(a=ivy.array([2, 3, 4]), b=ivy.array([3, 4, 5]))
>>> y = ivy.cumprod(x, exclusive=True)
>>> print(y)
{
a: ivy.array([1, 2, 6]),
b: ivy.array([1, 3, 12])
}
>>> x = ivy.Container(a=ivy.array([[2, 3],[5, 7],[11, 13]]), b=ivy.array([[3, 4],[4, 5],[5, 6]]))
>>> y = ivy.Container(a = ivy.zeros((3, 2)), b = ivy.zeros((3, 2)))
>>> ivy.cumprod(x, axis=1, exclusive=True, out=y)
>>> print(y)
{
a: ivy.array([[1, 2],
[1, 5],
[1, 11]]),
b: ivy.array([[1, 3],
[1, 4],
[1, 5]])
}
>>> x = ivy.Container(a=ivy.array([[2, 3],[5, 7],[11, 13]]), b=ivy.array([[3, 4],[4, 5],[5, 6]]))
>>> x.cumprod(axis=0, exclusive=True, out=x)
>>> print(x)
{
a: ivy.array([[1, 1],
[2, 3],
[10, 21]]),
b: ivy.array([[1, 1],
[3, 4],
[12, 20]])
}
""" # noqa: E501
return current_backend(x).cumprod(
x, axis=axis, exclusive=exclusive, reverse=reverse, dtype=dtype, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def einsum(
equation: str,
*operands: Union[ivy.Array, ivy.NativeArray],
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Sum the product of the elements of the input operands along dimensions
specified using a notation based on the Einstein summation convention.
Parameters
----------
equation
A str describing the contraction, in the same format as numpy.einsum.
operands
seq of arrays, the inputs to contract (each one an ivy.Array), whose shapes
should be consistent with equation.
out
optional output array, for writing the result to.
Returns
-------
ret
The array with sums computed.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
>>> y = ivy.einsum('ii', x)
>>> print(y)
ivy.array(12)
>>> x = ivy.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
>>> z = ivy.einsum('ij -> j', x)
>>> print(z)
ivy.array([ 9, 12, 15])
>>> A = ivy.array([0, 1, 2])
>>> B = ivy.array([[ 0, 1, 2, 3],
... [ 4, 5, 6, 7],
... [ 8, 9, 10, 11]])
>>> C = ivy.einsum('i,ij->i', A, B)
>>> print(C)
ivy.array([ 0, 22, 76])
>>> A = ivy.array([[1, 1, 1],
... [2, 2, 2],
... [5, 5, 5]])
>>> B = ivy.array([[0, 1, 0],
... [1, 1, 0],
... [1, 1, 1]])
>>> C = ivy.einsum('ij,jk->ik', A, B)
>>> print(C)
ivy.array([[ 2, 3, 1],
[ 4, 6, 2],
[10, 15, 5]])
>>> A = ivy.arange(10)
>>> B = ivy.arange(5, 15)
>>> C = ivy.einsum('i->', A)
>>> print(C)
ivy.array(45)
>>> A = ivy.arange(10)
>>> B = ivy.arange(5, 15)
>>> C = ivy.einsum('i,i->i', A, B)
>>> print(C)
ivy.array([ 0, 6, 14, 24, 36, 50, 66, 84, 104, 126])
>>> A = ivy.arange(10)
>>> B = ivy.arange(5, 15)
>>> C = ivy.einsum('i,i->', A, B) # or just use 'i,i'
>>> print(C)
ivy.array(510)
>>> A = ivy.arange(10)
>>> B = ivy.arange(5, 15)
>>> C = ivy.einsum('i,j->ij', A, B)
>>> print(C)
ivy.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
[ 10, 12, 14, 16, 18, 20, 22, 24, 26, 28],
[ 15, 18, 21, 24, 27, 30, 33, 36, 39, 42],
[ 20, 24, 28, 32, 36, 40, 44, 48, 52, 56],
[ 25, 30, 35, 40, 45, 50, 55, 60, 65, 70],
[ 30, 36, 42, 48, 54, 60, 66, 72, 78, 84],
[ 35, 42, 49, 56, 63, 70, 77, 84, 91, 98],
[ 40, 48, 56, 64, 72, 80, 88, 96, 104, 112],
[ 45, 54, 63, 72, 81, 90, 99, 108, 117, 126]])
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.Container(a=ivy.array([[ 0, 1, 2, 3],
... [ 4, 5, 6, 7],
... [ 8, 9, 10, 11]]),
... b=ivy.array([[ 0, 1, 2],
... [ 4, 5, 6],
... [ 8, 9, 10]]))
>>> z = ivy.einsum('i,ij->i', x, y)
>>> print(z)
{
a: ivy.array([0, 22, 76]),
b: ivy.array([0, 15, 54])
}
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[0, 1, 0],[1, 1, 0],[1, 1, 1]]),
... b=ivy.array([[0, 1, 2],[4, 5, 6],[8, 9, 10]]))
>>> y = ivy.einsum('ii', x)
>>> print(y)
{
a: ivy.array(2),
b: ivy.array(15)
}
"""
return current_backend(operands[0]).einsum(equation, *operands, out=out)
| ivy/ivy/functional/ivy/statistical.py/0 | {
"file_path": "ivy/ivy/functional/ivy/statistical.py",
"repo_id": "ivy",
"token_count": 18756
} | 50 |
from . import handler
from .handler import *
| ivy/ivy/utils/backend/__init__.py/0 | {
"file_path": "ivy/ivy/utils/backend/__init__.py",
"repo_id": "ivy",
"token_count": 11
} | 51 |
try:
from jax.config import config
config.update("jax_enable_x64", True)
except (ImportError, RuntimeError):
pass
| ivy/ivy_tests/__init__.py/0 | {
"file_path": "ivy/ivy_tests/__init__.py",
"repo_id": "ivy",
"token_count": 46
} | 52 |
# global
import numpy as np
from hypothesis import strategies as st
from typing import Optional
# local
import ivy
from ..pipeline_helper import BackendHandler, get_frontend_config
from . import number_helpers as nh
from . import array_helpers as ah
from .. import globals as test_globals
from ..globals import mod_backend
_dtype_kind_keys = {
"valid",
"numeric",
"float",
"unsigned",
"integer",
"signed_integer",
"complex",
"real_and_complex",
"float_and_integer",
"float_and_complex",
"bool",
}
def _get_fn_dtypes(framework: str, kind="valid", mixed_fn_dtypes="compositional"):
all_devices_dtypes = test_globals.CURRENT_RUNNING_TEST.supported_device_dtypes[
framework
]
if mixed_fn_dtypes in all_devices_dtypes:
all_devices_dtypes = all_devices_dtypes[mixed_fn_dtypes]
return all_devices_dtypes[test_globals.CURRENT_DEVICE_STRIPPED][kind]
def _get_type_dict(framework: str, kind: str, is_frontend_test=False):
if mod_backend[framework]:
proc, input_queue, output_queue = mod_backend[framework]
input_queue.put(("_get_type_dict_helper", framework, kind, is_frontend_test))
return output_queue.get()
else:
return _get_type_dict_helper(framework, kind, is_frontend_test)
def _get_type_dict_helper(framework, kind, is_frontend_test):
if is_frontend_test:
framework_module = get_frontend_config(framework).supported_dtypes
elif ivy.current_backend_str() == framework:
framework_module = ivy
else:
with BackendHandler.update_backend(framework) as ivy_backend:
framework_module = ivy_backend
if kind == "valid":
return framework_module.valid_dtypes
if kind == "numeric":
return framework_module.valid_numeric_dtypes
if kind == "integer":
return framework_module.valid_int_dtypes
if kind == "float":
return framework_module.valid_float_dtypes
if kind == "unsigned":
return framework_module.valid_uint_dtypes
if kind == "signed_integer":
return tuple(
set(framework_module.valid_int_dtypes).difference(
framework_module.valid_uint_dtypes
)
)
if kind == "complex":
return framework_module.valid_complex_dtypes
if kind == "real_and_complex":
return tuple(
set(framework_module.valid_numeric_dtypes).union(
framework_module.valid_complex_dtypes
)
)
if kind == "float_and_complex":
return tuple(
set(framework_module.valid_float_dtypes).union(
framework_module.valid_complex_dtypes
)
)
if kind == "float_and_integer":
return tuple(
set(framework_module.valid_float_dtypes).union(
framework_module.valid_int_dtypes
)
)
if kind == "bool":
return tuple(
set(framework_module.valid_dtypes).difference(
framework_module.valid_numeric_dtypes
)
)
raise RuntimeError(f"{kind} is an unknown kind!")
@st.composite
def get_dtypes(
draw,
kind="valid",
index=0,
mixed_fn_compos=True,
full=True,
none=False,
key=None,
prune_function=True,
):
"""Draws a valid dtypes for the test function. For frontend tests, it draws
the data types from the intersection between backend framework data types
and frontend framework dtypes, otherwise, draws it from backend framework
data types.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
kind
Supported types are integer, float, valid, numeric, signed_integer, complex,
real_and_complex, float_and_complex, bool, and unsigned
index
list indexing in case a test needs to be skipped for a particular dtype(s)
mixed_fn_compos
boolean if True, the function will return the dtypes of the compositional
implementation for mixed partial functions and if False, it will return
the dtypes of the primary implementation.
full
returns the complete list of valid types
none
allow none in the list of valid types
key
if provided, a shared value will be drawn from the strategy and passed to the
function as the keyword argument with the given name.
prune_function
if True, the function will prune the data types to only include the ones that
are supported by the current function. If False, the function will return all
the data types supported by the current backend.
Returns
-------
ret
A strategy that draws dtype strings
Examples
--------
>>> get_dtypes()
['float16',
'uint8',
'complex128',
'bool',
'uint32',
'float64',
'int8',
'int16',
'complex64',
'float32',
'int32',
'uint16',
'int64',
'uint64']
>>> get_dtypes(kind='valid', full=False)
['int16']
>>> get_dtypes(kind='valid', full=False)
['uint16']
>>> get_dtypes(kind='numeric', full=False)
['complex64']
>>> get_dtypes(kind='float', full=False, key="leaky_relu")
['float16']
>>> get_dtypes(kind='float', full=False, key="searchsorted")
['bfloat16']
>>> get_dtypes(kind='float', full=False, key="dtype")
['float32']
>>> get_dtypes("numeric", prune_function=False)
['int16']
>>> get_dtypes("valid", prune_function=False)
['uint32']
>>> get_dtypes("valid", prune_function=False)
['complex128']
>>> get_dtypes("valid", prune_function=False)
['bool']
>>> get_dtypes("valid", prune_function=False)
['float16']
"""
mixed_fn_dtypes = "compositional" if mixed_fn_compos else "primary"
if prune_function:
retrieval_fn = _get_fn_dtypes
if test_globals.CURRENT_RUNNING_TEST is not test_globals._Notsetval:
valid_dtypes = set(
retrieval_fn(
test_globals.CURRENT_BACKEND,
mixed_fn_dtypes=mixed_fn_dtypes,
kind=kind,
)
)
else:
raise RuntimeError(
"No function is set to prune, calling "
"prune_function=True without a function is redundant."
)
else:
retrieval_fn = _get_type_dict
valid_dtypes = set(retrieval_fn(test_globals.CURRENT_BACKEND, kind))
# The function may be called from a frontend test or an Ivy API test
# In the case of an Ivy API test, the function should make sure it returns a valid
# dtypes for the backend and also for the ground truth backend, if it is called from
# a frontend test, we should also count for the frontend support data types
# In conclusion, the following operations will get the intersection of
# FN_DTYPES & BACKEND_DTYPES & FRONTEND_DTYPES & GROUND_TRUTH_DTYPES
# If being called from a frontend test
if test_globals.CURRENT_FRONTEND is not test_globals._Notsetval:
frontend_dtypes = _get_type_dict_helper(
test_globals.CURRENT_FRONTEND, kind, True
)
valid_dtypes = valid_dtypes.intersection(frontend_dtypes)
# Make sure we return dtypes that are compatible with ground truth backend
ground_truth_is_set = (
test_globals.CURRENT_GROUND_TRUTH_BACKEND is not test_globals._Notsetval # NOQA
)
if ground_truth_is_set:
valid_dtypes = valid_dtypes.intersection(
retrieval_fn(test_globals.CURRENT_GROUND_TRUTH_BACKEND, kind=kind)
)
valid_dtypes = list(valid_dtypes)
if none:
valid_dtypes.append(None)
if full:
return valid_dtypes[index:]
if key is None:
return [draw(st.sampled_from(valid_dtypes[index:]))]
return [draw(st.shared(st.sampled_from(valid_dtypes[index:]), key=key))]
@st.composite
def array_dtypes(
draw,
*,
num_arrays=st.shared(nh.ints(min_value=1, max_value=4), key="num_arrays"),
available_dtypes=get_dtypes("valid"),
shared_dtype=False,
array_api_dtypes=False,
):
"""Draws a list of data types.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
num_arrays
number of data types to be drawn.
available_dtypes
universe of available data types.
shared_dtype
if True, all data types in the list are same.
array_api_dtypes
if True, use data types that can be promoted with the array_api_promotion
table.
Returns
-------
A strategy that draws a list of data types.
Examples
--------
>>> array_dtypes(
... available_dtypes=get_dtypes("numeric"),
... shared_dtype=True,
... )
['float64']
>>> array_dtypes(
... available_dtypes=get_dtypes("numeric"),
... shared_dtype=True,
... )
['int8', 'int8']
>>> array_dtypes(
... available_dtypes=get_dtypes("numeric"),
... shared_dtype=True,
... )
['int32', 'int32', 'int32', 'int32']
>>> array_dtypes(
... num_arrays=5,
... available_dtypes=get_dtypes("valid"),
... shared_dtype=False,
... )
['int8', 'float64', 'complex64', 'int8', 'bool']
>>> array_dtypes(
... num_arrays=5,
... available_dtypes=get_dtypes("valid"),
... shared_dtype=False,
... )
['bool', 'complex64', 'bool', 'complex64', 'bool']
>>> array_dtypes(
... num_arrays=5,
... available_dtypes=get_dtypes("valid"),
... shared_dtype=False,
... )
['float64', 'int8', 'float64', 'int8', 'float64']
"""
if isinstance(available_dtypes, st._internal.SearchStrategy):
available_dtypes = draw(available_dtypes)
if not isinstance(num_arrays, int):
num_arrays = draw(num_arrays)
if num_arrays == 1:
dtypes = draw(
ah.list_of_size(
x=st.sampled_from(available_dtypes),
size=1,
)
)
elif shared_dtype:
dtypes = draw(
ah.list_of_size(
x=st.sampled_from(available_dtypes),
size=1,
)
)
dtypes = [dtypes[0] for _ in range(num_arrays)]
else:
unwanted_types = set(ivy.all_dtypes).difference(set(available_dtypes))
if array_api_dtypes:
pairs = ivy.array_api_promotion_table.keys()
else:
pairs = ivy.promotion_table.keys()
# added to avoid complex dtypes from being sampled if they are not available.
[pair for pair in pairs if all(d in available_dtypes for d in pair)]
available_dtypes = [
pair for pair in pairs if not any(d in pair for d in unwanted_types)
]
dtypes = list(draw(st.sampled_from(available_dtypes)))
if num_arrays > 2:
dtypes += [dtypes[i % 2] for i in range(num_arrays - 2)]
return dtypes
@st.composite
def get_castable_dtype(draw, available_dtypes, dtype: str, x: Optional[list] = None):
"""Draws castable dtypes for the given dtype based on the current backend.
Parameters
----------
draw
Special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
available_dtypes
Castable data types are drawn from this list randomly.
dtype
Data type from which to cast.
x
Optional list of values to cast.
Returns
-------
ret
A tuple of inputs and castable dtype.
"""
cast_dtype = draw(
st.sampled_from(available_dtypes).filter(
lambda value: cast_filter(value, dtype=dtype, x=x)
)
)
if x is None:
return dtype, cast_dtype
return dtype, x, cast_dtype
def cast_filter(d, dtype, x):
if mod_backend[test_globals.CURRENT_BACKEND]:
proc, input_queue, output_queue = mod_backend[test_globals.CURRENT_BACKEND]
input_queue.put(
("cast_filter_helper", d, dtype, x, test_globals.CURRENT_BACKEND)
)
return output_queue.get()
else:
return cast_filter_helper(d, dtype, x, test_globals.CURRENT_BACKEND)
def cast_filter_helper(d, dtype, x, current_backend):
with BackendHandler.update_backend(current_backend) as ivy_backend:
def bound_dtype_bits(d):
return (
ivy_backend.dtype_bits(d) / 2
if ivy_backend.is_complex_dtype(d)
else ivy_backend.dtype_bits(d)
)
if ivy_backend.is_int_dtype(d):
max_val = ivy_backend.iinfo(d).max
min_val = ivy_backend.iinfo(d).min
elif ivy_backend.is_float_dtype(d) or ivy_backend.is_complex_dtype(d):
max_val = ivy_backend.finfo(d).max
min_val = ivy_backend.finfo(d).min
else:
max_val = 1
min_val = -1
if x is None:
if ivy_backend.is_int_dtype(dtype):
max_x = ivy_backend.iinfo(dtype).max
min_x = ivy_backend.iinfo(dtype).min
elif ivy_backend.is_float_dtype(dtype) or ivy_backend.is_complex_dtype(
dtype
):
max_x = ivy_backend.finfo(dtype).max
min_x = ivy_backend.finfo(dtype).min
else:
max_x = 1
min_x = -1
else:
max_x = np.max(np.asarray(x))
min_x = np.min(np.asarray(x))
return (
max_x <= max_val
and min_x >= min_val
and bound_dtype_bits(d) >= bound_dtype_bits(dtype)
and (
ivy_backend.is_complex_dtype(d)
or not ivy_backend.is_complex_dtype(dtype)
)
and (min_x > 0 or not ivy_backend.is_uint_dtype(dtype))
)
| ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/dtype_helpers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/dtype_helpers.py",
"repo_id": "ivy",
"token_count": 6349
} | 53 |
from .base import FrontendConfig, SupportedDeviecs, SupportedDtypes
import scipy
import numpy as np
def get_config():
return ScipyFrontendConfig()
class ScipyFrontendConfig(FrontendConfig):
Dtype = scipy.dtype
Device = str
valid_devices = "cpu"
invalid_devices = ("tpu", "gpu")
valid_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"float32",
"float64",
"complex64",
"complex128",
"bool",
]
invalid_dtypes = [
"uint16",
"uint32",
"uint64",
"bfloat16",
]
valid_numeric_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"float32",
"float64",
"complex64",
"complex128",
]
invalid_numeric_dtypes = [
"uint16",
"uint32",
"uint64",
"bfloat16",
]
valid_int_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
]
invalid_int_dtypes = [
"uint16",
"uint32",
"uint64",
]
valid_uint_dtypes = [
"uint8",
]
invalid_uint_dtypes = [
"uint16",
"uint32",
"uint64",
]
valid_float_dtypes = [
"float16",
"float32",
"float64",
]
invalid_float_dtypes = [
"bfloat16",
]
valid_complex_dtypes = [
"complex64",
"complex128",
]
invalid_complex_dtypes = []
@property
def supported_devices(self):
return SupportedDeviecs(
valid_devices=self.valid_devices, invalid_devices=self.invalid_devices
)
@property
def supported_dtypes(self):
return SupportedDtypes(
valid_dtypes=self.valid_dtypes,
invalid_dtypes=self.invalid_dtypes,
valid_numeric_dtypes=self.valid_numeric_dtypes,
invalid_numeric_dtypes=self.invalid_numeric_dtypes,
valid_int_dtypes=self.valid_int_dtypes,
invalid_int_dtypes=self.invalid_int_dtypes,
valid_uint_dtypes=self.valid_uint_dtypes,
invalid_uint_dtypes=self.invalid_uint_dtypes,
valid_float_dtypes=self.valid_float_dtypes,
invalid_float_dtypes=self.invalid_float_dtypes,
valid_complex_dtypes=self.valid_complex_dtypes,
invalid_complex_dtypes=self.invalid_complex_dtypes,
)
def native_array(self, x):
return scipy.array(x)
def is_native_array(self, x):
return isinstance(x, scipy.ndarray)
def to_numpy(self, x):
return x
def as_native_dtype(self, dtype: str):
return scipy.dtype(dtype)
def as_native_device(self, device: str):
return device
def isscalar(self, x):
return np.isscalar(x)
| ivy/ivy_tests/test_ivy/test_frontends/config/scipy.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/scipy.py",
"repo_id": "ivy",
"token_count": 1505
} | 54 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="jax.lax.stop_gradient",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_stop_gradient(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_custom_gradient_operators.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_custom_gradient_operators.py",
"repo_id": "ivy",
"token_count": 347
} | 55 |
# global
from hypothesis import strategies as st, assume
import numpy as np
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
_get_castable_dtype,
)
from ivy import inf
# --- Helpers --- #
# --------------- #
@st.composite
def _get_array_axes_probs(draw):
array_dtypes = draw(helpers.get_dtypes(kind="float"))
array_dtype, array, axes = draw(
helpers.dtype_values_axis(
available_dtypes=array_dtypes,
small_abs_safety_factor=5,
large_abs_safety_factor=5,
min_num_dims=1,
max_num_dims=5,
max_dim_size=7,
max_axes_size=5,
valid_axis=True,
force_int_axis=True,
min_value=1,
max_value=300,
)
)
q = np.round(
np.array(
draw(
helpers.lists(
x=helpers.floats(
min_value=0,
max_value=1,
small_abs_safety_factor=50,
large_abs_safety_factor=50,
safety_factor_scale="log",
abs_smallest_val=1e-1,
mixed_fn_compos=False,
),
min_size=1,
max_size=10,
)
)
),
decimals=3,
)
return array_dtype, array, axes, q
# nanmean
@st.composite
def _get_castable_dtype_with_nan(draw):
available_dtypes = helpers.get_dtypes("float")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=6,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
allow_nan=True,
allow_inf=True,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
return dtype1, [values], axis, dtype2
@st.composite
def _get_castable_dtypes_values(draw, *, allow_nan=False, use_where=False):
available_dtypes = helpers.get_dtypes("numeric")
shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=4, max_dim_size=6))
dtype, values = draw(
helpers.dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=1,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
shape=shape,
allow_nan=allow_nan,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
dtype1, values, dtype2 = draw(
helpers.get_castable_dtype(draw(available_dtypes), dtype[0], values[0])
)
if use_where:
where = draw(np_helpers.where(shape=shape))
return [dtype1], [values], axis, dtype2, where
return [dtype1], [values], axis, dtype2
# cov
@st.composite
def _get_dtype_value1_value2_cov(
draw,
available_dtypes,
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
max_dim_size=3,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
large_abs_safety_factor=50,
small_abs_safety_factor=50,
safety_factor_scale="log",
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
dtype = draw(st.sampled_from(available_dtypes))
values = []
for i in range(2):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1, value2 = values[0], values[1]
# modifiers: rowVar, bias, ddof
rowVar = draw(st.booleans())
bias = draw(st.booleans())
ddof = draw(helpers.ints(min_value=0, max_value=1))
numVals = None
if rowVar is False:
numVals = -1 if numVals == 0 else 0
else:
numVals = 0 if len(shape) == 1 else -1
fweights = None
aweights = draw(
helpers.array_values(
dtype=dtype,
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
small_abs_safety_factor=1,
)
)
return [dtype], value1, value2, rowVar, bias, ddof, fweights, aweights
@st.composite
def _percentile_helper(draw):
large_abs_safety_factor = 2
small_abs_safety_factor = 2
dtype, values, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
valid_axis=True,
allow_neg_axes=False,
min_axes_size=1,
force_int_axis=True,
)
)
q = draw(
st.one_of(
helpers.array_values(
dtype=helpers.get_dtypes("float"),
shape=helpers.get_shape(min_dim_size=1, max_num_dims=1, min_num_dims=1),
min_value=0.0,
max_value=100.0,
exclude_max=False,
exclude_min=False,
),
st.floats(min_value=0.0, max_value=100.0),
)
)
interpolation_names = [
"linear",
"lower",
"higher",
"midpoint",
"nearest",
]
interpolation = draw(
helpers.list_of_size(
x=st.sampled_from(interpolation_names),
size=1,
)
)
return dtype, values, axis, interpolation, q
# --- Main --- #
# ------------ #
# argmin
@handle_frontend_test(
fn_tree="jax.numpy.argmin",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
force_int_axis=True,
min_num_dims=1,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_jax_argmin(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
)
# average
@handle_frontend_test(
fn_tree="jax.numpy.average",
dtype_x_axis=helpers.dtype_values_axis(
num_arrays=2,
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
valid_axis=True,
allow_neg_axes=False,
min_axes_size=1,
),
returned=st.booleans(),
)
def test_jax_average(
*,
dtype_x_axis,
returned,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, axis = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
np_helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
atol=2e-2,
rtol=2e-2,
a=x[0],
axis=axis,
weights=x[1],
returned=returned,
)
# bincount
@handle_frontend_test(
fn_tree="jax.numpy.bincount",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1,
max_value=2,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
),
key="a_s_d",
),
),
test_with_out=st.just(False),
)
def test_jax_bincount(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
weights=None,
minlength=0,
length=None,
)
@handle_frontend_test(
fn_tree="jax.numpy.corrcoef",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=2,
shared_dtype=True,
abs_smallest_val=1e-5,
min_num_dims=2,
max_num_dims=2,
min_dim_size=3,
max_dim_size=3,
min_value=-100,
max_value=100,
),
rowvar=st.booleans(),
)
def test_jax_corrcoef(
dtype_and_x,
rowvar,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
rowvar=rowvar,
)
# correlate
@handle_frontend_test(
fn_tree="jax.numpy.correlate",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_value=-1e04,
max_value=1e04,
shared_dtype=True,
),
mode=st.sampled_from(["valid", "same", "full"]),
)
def test_jax_correlate(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
mode,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-4,
atol=1e-4,
on_device=on_device,
a=x[0],
v=x[1],
mode=mode,
)
@handle_frontend_test(
fn_tree="jax.numpy.cov",
dtypes_args=_get_dtype_value1_value2_cov(available_dtypes=["float64"]),
test_with_out=st.just(False),
)
def test_jax_cov(
*,
dtypes_args,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
dtype, value1, value2, rowvar, bias, ddof, fweights, aweights = dtypes_args
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-3,
atol=1e-3,
on_device=on_device,
m=value1,
y=value2,
rowvar=rowvar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
)
# cumprod
@handle_frontend_test(
fn_tree="jax.numpy.cumprod",
# aliases=["jax.numpy.cumproduct"], deprecated since 0.4.12
dtype_x_axis=_get_castable_dtype(),
test_with_out=st.just(False),
)
def test_jax_cumprod(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, dtype = dtype_x_axis
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=[input_dtype],
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
a=x[0],
axis=axis,
dtype=dtype,
)
# cumsum
@handle_frontend_test(
fn_tree="jax.numpy.cumsum",
dtype_x_axis=_get_castable_dtype(),
test_with_out=st.just(False),
)
def test_jax_cumsum(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis, dtype = dtype_x_axis
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=[input_dtype],
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype,
)
# einsum
@handle_frontend_test(
fn_tree="jax.numpy.einsum",
eq_n_op=st.sampled_from(
[
(
"ii",
np.arange(25).reshape(5, 5),
),
(
"ii->i",
np.arange(25).reshape(5, 5),
),
("ij,j", np.arange(25).reshape(5, 5), np.arange(5)),
]
),
dtype=helpers.get_dtypes("float", full=False),
)
def test_jax_einsum(
*,
eq_n_op,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
kw = {}
i = 0
for x_ in eq_n_op:
kw[f"x{i}"] = x_
i += 1
test_flags.num_positional_args = i
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kw,
out=None,
optimize="optimal",
precision=None,
_use_xeinsum=False,
)
# max
@handle_frontend_test(
fn_tree="jax.numpy.max",
aliases=["jax.numpy.amax"],
dtype_x_axis=_statistical_dtype_values(function="max"),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_max(
*,
dtype_x_axis,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
where=where,
)
# mean
@handle_frontend_test(
fn_tree="jax.numpy.mean",
dtype_x_axis=_statistical_dtype_values(function="mean"),
dtype=helpers.get_dtypes("float", full=False, none=True),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_mean(
*,
dtype_x_axis,
dtype,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-1,
rtol=1e-1,
a=x[0],
axis=axis,
dtype=dtype[0],
out=None,
keepdims=keepdims,
where=where,
)
# median
@handle_frontend_test(
fn_tree="jax.numpy.median",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_value=-(2**10),
max_value=2**10,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_jax_median(
*,
dtype_x_axis,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
overwrite_input=False,
keepdims=keepdims,
atol=1e-3,
rtol=1e-3,
)
# min
@handle_frontend_test(
fn_tree="jax.numpy.min",
aliases=["jax.numpy.amin"],
dtype_x_axis=_statistical_dtype_values(function="min"),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_min(
*,
dtype_x_axis,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
where=where,
)
# nancumprod
@handle_frontend_test(
fn_tree="jax.numpy.nancumprod",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(allow_nan=True),
)
def test_jax_nancumprod(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
if ivy.current_backend_str() == "torch":
assume(not test_flags.as_variable[0])
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype,
)
# nancumsum
@handle_frontend_test(
fn_tree="jax.numpy.nancumsum",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(allow_nan=True),
)
def test_jax_nancumsum(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
if ivy.current_backend_str() == "torch":
assume(not test_flags.as_variable[0])
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype,
)
# nanmax
@handle_frontend_test(
fn_tree="jax.numpy.nanmax",
aliases=["jax.numpy.nanmax"],
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
allow_nan=True,
allow_inf=True,
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_helpers.where(),
)
def test_jax_nanmax(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is -inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
initial=initial,
where=where,
)
@handle_frontend_test(
fn_tree="jax.numpy.nanmean",
dtype_x_axis_castable_dtype=_get_castable_dtype_with_nan(),
keepdims=st.booleans(),
where=np_helpers.where(),
)
def test_jax_nanmean(
dtype_x_axis_castable_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
keepdims,
):
input_dtypes, x, axis, castable_dtype = dtype_x_axis_castable_dtype
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=[input_dtypes],
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
on_device=on_device,
fn_tree=fn_tree,
a=x[0],
axis=axis,
dtype=castable_dtype,
out=None,
keepdims=keepdims,
where=where,
)
# nanmedian
@handle_frontend_test(
fn_tree="jax.numpy.nanmedian",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
min_value=-(2**10),
max_value=2**10,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_jax_nanmedian(
on_device,
frontend,
dtype_x_axis,
keepdims,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
# TODO: overwrite as a boolean when \
# there's a way around jax.numpy.nanquantile does not
# support overwrite_input=True.
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
overwrite_input=False,
keepdims=keepdims,
)
# nanmin
@handle_frontend_test(
fn_tree="jax.numpy.nanmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float", full=False),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
allow_nan=True,
allow_inf=True,
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_helpers.where(),
)
def test_jax_nanmin(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
initial=initial,
where=where,
)
# nanstd
@handle_frontend_test(
fn_tree="jax.numpy.nanstd",
dtype_and_a=_statistical_dtype_values(function="nanstd"),
dtype=helpers.get_dtypes("float", full=False, none=True),
where=np_helpers.where(),
keep_dims=st.booleans(),
)
def test_jax_nanstd(
dtype_and_a,
dtype,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtypes, a, axis, correction = dtype_and_a
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
assume(np.dtype(dtype[0]) >= np.dtype(input_dtypes[0]))
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
axis=axis,
dtype=dtype[0],
out=None,
ddof=correction,
keepdims=keep_dims,
where=where,
atol=1e-2,
rtol=1e-2,
)
# nanvar
@handle_frontend_test(
fn_tree="jax.numpy.nanvar",
dtype_x_axis=_statistical_dtype_values(function="nanvar").filter(
lambda x: x[0][0] != "bfloat16"
),
dtype=helpers.get_dtypes("float", full=False, none=True).filter(
lambda x: x != "bfloat16"
),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_nanvar(
*,
dtype_x_axis,
dtype,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis, ddof = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
assume(np.dtype(dtype[0]) >= np.dtype(input_dtypes[0]))
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype[0],
out=None,
ddof=ddof,
keepdims=keepdims,
where=where,
atol=1e-3,
rtol=1e-3,
)
# ptp
@handle_frontend_test(
fn_tree="jax.numpy.ptp",
dtype_and_x_axis_dtype=_get_castable_dtypes_values(),
keep_dims=st.booleans(),
)
def test_jax_ptp(
dtype_and_x_axis_dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtypes, x, axis, dtype = dtype_and_x_axis_dtype
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keep_dims,
)
@handle_frontend_test(
fn_tree="jax.numpy.quantile",
dtype_array_axes_q=_get_array_axes_probs(),
overwrite_input=st.just(False),
keepdims=st.booleans(),
method=st.sampled_from(["linear", "lower", "higher", "midpoint", "nearest"]),
)
def test_jax_quantile(
*,
dtype_array_axes_q,
overwrite_input,
keepdims,
method,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, array, axes, q = dtype_array_axes_q
helpers.test_frontend_function(
input_dtypes=dtypes,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
a=array[0],
q=q,
axis=axes,
overwrite_input=overwrite_input,
method=method,
keepdims=keepdims,
)
# std
@handle_frontend_test(
fn_tree="jax.numpy.std",
dtype_x_axis=_statistical_dtype_values(function="std"),
dtype=helpers.get_dtypes("float", full=False, none=True),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_std(
*,
dtype_x_axis,
dtype,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis, ddof = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype[0],
out=None,
ddof=ddof,
keepdims=keepdims,
where=where,
atol=1e-3,
rtol=1e-3,
)
# sum
@handle_frontend_test(
fn_tree="jax.numpy.sum",
dtype_x_axis_castable=_get_castable_dtype(),
initial=st.none() | st.floats(-10.0, 10.0),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_sum(
*,
dtype_x_axis_castable,
initial,
where,
keepdims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis, castable_dtype = dtype_x_axis_castable
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=[input_dtypes],
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-2,
a=x[0],
axis=axis,
dtype=castable_dtype,
out=None,
keepdims=keepdims,
initial=initial,
where=where,
backend_to_test=backend_fw,
)
# var
@handle_frontend_test(
fn_tree="jax.numpy.var",
dtype_x_axis=_statistical_dtype_values(function="var").filter(
lambda x: x[0][0] != "bfloat16"
),
dtype=helpers.get_dtypes("float", full=False, none=True).filter(
lambda x: x != "bfloat16"
),
where=np_helpers.where(),
keepdims=st.booleans(),
)
def test_jax_var(
*,
dtype_x_axis,
dtype,
keepdims,
where,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, axis, ddof = dtype_x_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
dtype=dtype[0],
out=None,
ddof=ddof,
keepdims=keepdims,
where=where,
atol=1e-3,
rtol=1e-3,
)
@handle_frontend_test(
fn_tree="jax.numpy.nanpercentile",
dtype_and_x=_percentile_helper(),
keep_dims=st.booleans(),
test_gradients=st.just(False),
test_with_out=st.just(False),
)
def test_nanpercentile(
*, dtype_and_x, keep_dims, test_flags, backend_fw, fn_name, on_device
):
input_dtype, x, axis, interpolation, q = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
a=x[0],
q=q,
axis=axis,
interpolation=interpolation[0],
keepdims=keep_dims,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_statistical.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_statistical.py",
"repo_id": "ivy",
"token_count": 17139
} | 56 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="numpy.array",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_numpy_array(
dtype_and_a,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
object=a,
dtype=dtype[0],
)
# asarray
@handle_frontend_test(
fn_tree="numpy.asarray",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
)
def test_numpy_asarray(
dtype_and_a,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a,
dtype=dtype[0],
)
# copy
@handle_frontend_test(
fn_tree="numpy.copy",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
test_with_copy=st.just(True),
)
def test_numpy_copy(
dtype_and_a,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
# frombuffer
@handle_frontend_test(
fn_tree="numpy.frombuffer",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
test_with_out=st.just(False),
)
def test_numpy_frombuffer(
dtype_and_a,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
buffer=a,
dtype=dtype[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_from_existing_data.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_from_existing_data.py",
"repo_id": "ivy",
"token_count": 1648
} | 57 |
# global
from hypothesis import strategies as st
# local
import ivy
from ivy.functional.frontends.numpy.ma.MaskedArray import MaskedArray
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _array_mask(draw):
dtype = draw(helpers.get_dtypes("valid", prune_function=False, full=False))
dtypes, x_mask = draw(
helpers.dtype_and_values(
num_arrays=2,
dtype=[dtype[0], "bool"],
)
)
return dtype[0], x_mask
# --- Main --- #
# ------------ #
# data
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_array_mask(),
)
def test_numpy_data(
args,
):
dtype, data = args
x = MaskedArray(data[0], mask=data[1], dtype=dtype)
assert ivy.all(x.data == ivy.array(data[0]))
# dtype
@handle_frontend_test(fn_tree="numpy.add", dtype_x_mask=_array_mask()) # dummy fn_tree
def test_numpy_dtype(dtype_x_mask):
dtype, data = dtype_x_mask
x = MaskedArray(data[0], mask=data[1], dtype=dtype)
assert x.dtype == dtype
# fill_value
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
dtype_x_mask=_array_mask(),
fill=st.integers(),
)
def test_numpy_fill_value(
dtype_x_mask,
fill,
):
dtype, data = dtype_x_mask
x = MaskedArray(data[0], mask=data[1], dtype=dtype, fill_value=fill)
assert x.fill_value == ivy.array(fill, dtype=dtype)
# hardmask
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
dtype_x_mask=_array_mask(),
hard=st.booleans(),
)
def test_numpy_hardmask(dtype_x_mask, hard):
dtype, data = dtype_x_mask
x = MaskedArray(data[0], mask=data[1], dtype=dtype, hard_mask=hard)
assert x.hardmask == hard
# mask
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
args=_array_mask(),
)
def test_numpy_mask(args):
dtype, data = args
x = MaskedArray(data[0], mask=ivy.array(data[1]), dtype=dtype, shrink=False)
assert ivy.all(x.mask == ivy.array(data[1]))
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_ma/test_MaskedArray.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_ma/test_MaskedArray.py",
"repo_id": "ivy",
"token_count": 911
} | 58 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# repeat
@handle_frontend_test(
fn_tree="numpy.repeat",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_dim_size=2,
),
repeats=helpers.ints(min_value=2, max_value=5),
axis=helpers.ints(min_value=-1, max_value=1),
test_with_out=st.just(False),
)
def test_numpy_repeat(
*,
dtype_and_x,
repeats,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
repeats=repeats,
axis=axis,
)
# tile
@handle_frontend_test(
fn_tree="numpy.tile",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
dtype_and_repeats=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape").map(
lambda rep: (len(rep),)
),
min_value=0,
max_value=10,
),
test_with_out=st.just(False),
)
def test_numpy_tile(
*,
dtype_and_x,
dtype_and_repeats,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
repeats_dtype, repeats = dtype_and_repeats
helpers.test_frontend_function(
input_dtypes=input_dtype + repeats_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x[0],
reps=repeats[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_tiling_arrays.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_tiling_arrays.py",
"repo_id": "ivy",
"token_count": 1018
} | 59 |
# global
from hypothesis import assume, strategies as st
import numpy as np
import sys
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_method, handle_frontend_test
import ivy.functional.frontends.numpy as ivy_np
CLASS_TREE = "ivy.functional.frontends.numpy.matrix"
# --- Helpers --- #
# --------------- #
def _get_x_matrix(x, to_str):
if to_str:
x = _to_string_matrix(x[0])
else:
x = x[0]
return x
@st.composite
def _property_helper(draw):
_, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-1e04,
max_value=1e04,
min_num_dims=2,
max_num_dims=2,
)
)
to_str = (st.booleans(),)
x = _get_x_matrix(x, to_str)
data = ivy_np.matrix(x)
data_gt = np.matrix(x)
return data, data_gt
def _to_string_matrix(num_matrix):
str_matrix = ""
for i, row in enumerate(num_matrix):
for j, elem in enumerate(row):
str_matrix += str(elem)
if j < num_matrix.shape[1] - 1:
str_matrix += " "
elif i < num_matrix.shape[0] - 1:
str_matrix += "; "
return str_matrix
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_A(matrices, backend_fw):
data, data_gt = matrices
ret = ivy_np.ravel(data.A)
ret_gt = np.ravel(data_gt.A)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_A1(matrices, backend_fw):
data, data_gt = matrices
helpers.value_test(
ret_np_flat=data.A1,
ret_np_from_gt_flat=data_gt.A1,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_I(matrices, backend_fw):
data, data_gt = matrices
assume(
np.linalg.cond(data.A.data) < 1 / sys.float_info.epsilon
and data.shape[0] == data.shape[1]
)
ret = ivy_np.ravel(data.I)
ret_gt = np.ravel(data_gt.I)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_T(matrices, backend_fw):
data, data_gt = matrices
ret = ivy_np.ravel(data.T)
ret_gt = np.ravel(data_gt.T)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_data(matrices):
data, data_gt = matrices
# sanity test
ivy.utils.assertions.check_equal(
type(data.data), type(data_gt.data), as_array=False
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_dtype(matrices):
data, data_gt = matrices
ivy.utils.assertions.check_equal(
str(data.dtype), str(data_gt.dtype), as_array=False
)
# any
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.matrix",
method_name="any",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=False,
),
to_str=st.booleans(),
)
def test_numpy_matrix_any(
dtype_x_axis,
to_str,
init_flags,
method_flags,
backend_fw,
frontend_method_data,
frontend,
on_device,
):
input_dtype, x, axis = dtype_x_axis
x = _get_x_matrix(x, to_str)
if isinstance(axis, tuple):
axis = axis[0]
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"data": x,
"dtype": input_dtype[0],
},
method_input_dtypes=[],
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
# argmax
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.matrix",
method_name="argmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=False,
),
to_str=st.booleans(),
)
def test_numpy_matrix_argmax(
dtype_x_axis,
to_str,
init_flags,
method_flags,
backend_fw,
frontend_method_data,
frontend,
on_device,
):
input_dtype, x, axis = dtype_x_axis
x = _get_x_matrix(x, to_str)
if isinstance(axis, tuple):
axis = axis[0]
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"data": x,
"dtype": input_dtype[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_ndim(matrices):
data, data_gt = matrices
ivy.utils.assertions.check_equal(data.ndim, data_gt.ndim, as_array=False)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_shape(matrices):
data, data_gt = matrices
ivy.utils.assertions.check_equal(data.shape, data_gt.shape, as_array=False)
@handle_frontend_test(
fn_tree="numpy.add", # dummy fn_tree
matrices=_property_helper(),
)
def test_numpy_size(matrices):
data, data_gt = matrices
ivy.utils.assertions.check_equal(data.size, data_gt.size, as_array=False)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_matrix/test_methods.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_matrix/test_methods.py",
"repo_id": "ivy",
"token_count": 3277
} | 60 |
# global
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_prelu_arrays(draw):
arr_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
input = draw(
helpers.array_values(
dtype=dtype[0], shape=(arr_size), min_value=0, max_value=10
)
)
weight = draw(
helpers.array_values(dtype=dtype[0], shape=(1,), min_value=0, max_value=1.0)
)
input_weight = input, weight
return dtype, input_weight
# --- Main --- #
# ------------ #
# celu
@handle_frontend_test(
fn_tree="paddle.nn.functional.celu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
alpha=helpers.floats(min_value=0.1, max_value=1.0),
)
def test_paddle_celu(
*,
dtype_and_x,
alpha,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
alpha=alpha,
)
# elu
@handle_frontend_test(
fn_tree="paddle.nn.functional.elu",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
alpha=helpers.floats(min_value=0, max_value=1, exclude_min=True),
)
def test_paddle_elu(
*,
dtype_and_input,
alpha,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
alpha=alpha,
)
# gelu
@handle_frontend_test(
fn_tree="paddle.nn.functional.gelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
approximate=st.booleans(),
)
def test_paddle_gelu(
*,
dtype_and_x,
approximate,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
approximate=approximate,
)
# glu
@handle_frontend_test(
fn_tree="paddle.nn.functional.glu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
min_value=-2,
min_num_dims=1,
min_dim_size=4,
max_dim_size=4,
),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_paddle_glu(
*,
dtype_and_x,
axis,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-01,
x=x[0],
axis=axis,
)
# gumbel_softmax
@handle_frontend_test(
fn_tree="paddle.nn.functional.gumbel_softmax",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_axes_size=1,
force_int_axis=True,
valid_axis=True,
min_value=-30.0,
max_value=30.0,
),
dtypes=helpers.get_dtypes("float", none=False, full=False),
temperature=st.floats(min_value=1e-3, max_value=10),
hard=st.booleans(),
)
def test_paddle_gumbel_softmax(
*,
dtype_x_and_axis,
dtypes,
temperature,
hard,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=ivy.as_ivy_dtype(dtypes[0]),
temperature=temperature,
hard=hard,
)
# hardshrink
@handle_frontend_test(
fn_tree="paddle.nn.functional.hardshrink",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
threshold=helpers.floats(min_value=0, max_value=1, exclude_min=True),
)
def test_paddle_hardshrink(
*,
dtype_and_x,
threshold,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
threshold=threshold,
)
# hardsigmoid
@handle_frontend_test(
fn_tree="paddle.nn.functional.hardsigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
slope=helpers.ints(min_value=0, max_value=10),
offset=helpers.ints(min_value=0, max_value=10),
)
def test_paddle_hardsigmoid(
*,
dtype_and_x,
slope,
offset,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
slope=slope,
offset=offset,
)
# hardswish
@handle_frontend_test(
fn_tree="paddle.nn.functional.hardswish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
safety_factor_scale="log",
),
)
def test_paddle_hardswish(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# hardtanh
@handle_frontend_test(
fn_tree="paddle.nn.functional.hardtanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
max_val=helpers.floats(min_value=0, max_value=1, exclude_min=True),
)
def test_paddle_hardtanh(
*,
dtype_and_x,
max_val,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
max_min = max_val, -max_val
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
min=max_min[1],
max=max_min[0],
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.leaky_relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_leaky_relu(
*,
dtype_and_x,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
negative_slope=0.01,
x=x[0],
)
# log_sigmoid
@handle_frontend_test(
fn_tree="paddle.nn.functional.log_sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=3,
small_abs_safety_factor=3,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_paddle_log_sigmoid(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log_softmax
@handle_frontend_test(
fn_tree="paddle.nn.functional.log_softmax",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_axes_size=1,
force_int_axis=True,
valid_axis=True,
min_value=-30.0,
max_value=30.0,
),
dtypes=helpers.get_dtypes("float", none=False, full=False),
)
def test_paddle_log_softmax(
*,
dtype_x_and_axis,
dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
axis=axis,
dtype=ivy.as_ivy_dtype(dtypes[0]),
)
# mish
@handle_frontend_test(
fn_tree="paddle.nn.functional.mish",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
)
def test_paddle_mish(
*,
dtype_and_input,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# prelu
@handle_frontend_test(
fn_tree="paddle.nn.functional.prelu",
dtype_input_and_weight=_generate_prelu_arrays(),
)
def test_paddle_prelu(
*,
dtype_input_and_weight,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_input_and_weight
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
weight=x[1],
)
# relu
@handle_frontend_test(
fn_tree="paddle.nn.functional.relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_relu(
dtype_and_x,
frontend,
test_flags,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
)
# relu6
@handle_frontend_test(
fn_tree="paddle.nn.functional.relu6",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_relu6(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# relu_
@handle_frontend_test(
fn_tree="paddle.nn.functional.relu_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_relu_(
dtype_and_x,
frontend,
test_flags,
backend_fw,
fn_tree,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
)
@handle_frontend_test(
fn_tree="paddle.nn.functional.rrelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_rrelu(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
)
# selu
@handle_frontend_test(
fn_tree="paddle.nn.functional.selu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
scale=helpers.ints(min_value=2, max_value=10),
alpha=helpers.ints(min_value=1, max_value=10),
)
def test_paddle_selu(
*,
dtype_and_x,
scale,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
alpha=alpha,
scale=scale,
)
# silu
@handle_frontend_test(
fn_tree="paddle.nn.functional.silu",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_silu(
*,
dtype_and_input,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# softmax_
@handle_frontend_test(
fn_tree="paddle.nn.functional.softmax_",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_axes_size=1,
force_int_axis=True,
valid_axis=True,
min_value=-30.0,
max_value=30.0,
),
dtypes=helpers.get_dtypes("float", none=False, full=False),
)
def test_paddle_softmax_(
*,
dtype_x_and_axis,
dtypes,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x, axis = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
dtype=ivy.as_ivy_dtype(dtypes[0]),
)
# softplus
@handle_frontend_test(
fn_tree="paddle.nn.functional.softplus",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
beta=st.floats(min_value=1e-3, max_value=10), # strategy for the beta argument
threshold=st.floats(
min_value=1e-3, max_value=10
), # strategy for the threshold argument
)
def test_paddle_softplus(
*,
dtype_and_input,
beta,
threshold,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
beta=beta,
threshold=threshold,
)
# softshrink
@handle_frontend_test(
fn_tree="paddle.nn.functional.softshrink",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
threshold=helpers.floats(min_value=0, max_value=1, exclude_min=True),
)
def test_paddle_softshrink(
*,
dtype_and_input,
threshold,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
threshold=threshold,
)
# softsign
@handle_frontend_test(
fn_tree="paddle.nn.functional.softsign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
small_abs_safety_factor=20,
),
)
def test_paddle_softsign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# swish
@handle_frontend_test(
fn_tree="paddle.nn.functional.swish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_swish(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tanh_
@handle_frontend_test(
fn_tree="paddle.nn.functional.tanh_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_tanh_(
*,
dtype_and_x,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tanhshrink
@handle_frontend_test(
fn_tree="paddle.nn.functional.tanhshrink",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_tanhshrink(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=False,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_activation.py",
"repo_id": "ivy",
"token_count": 10080
} | 61 |
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from hypothesis import strategies as st
# --- Helpers --- #
# --------------- #
@st.composite
def _get_clip_inputs_(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
min_value=0,
max_value=50,
)
)
return x_dtype, x
# --- Main --- #
# ------------ #
# add_
@handle_frontend_test(
fn_tree="paddle.tensor.math.add_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_add_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# ceil_
@handle_frontend_test(
fn_tree="paddle.tensor.math.ceil_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_ceil_(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# clip_
@handle_frontend_test(
fn_tree="paddle.tensor.math.clip_",
input_and_ranges=_get_clip_inputs_(),
min=st.integers(min_value=0, max_value=5),
max=st.integers(min_value=5, max_value=10),
)
def test_paddle_clip_(
*,
input_and_ranges,
min,
max,
frontend,
fn_tree,
test_flags,
backend_fw,
on_device,
):
input_dtype, x = input_and_ranges
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
min=min,
max=max,
)
# exp_
@handle_frontend_test(
fn_tree="paddle.tensor.math.exp_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_exp_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floor_
@handle_frontend_test(
fn_tree="paddle.tensor.math.floor_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_floor_(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# lerp_
@handle_frontend_test(
fn_tree="paddle.tensor.math.lerp_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_lerp_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
weight=x[2],
)
# reciprocal_
@handle_frontend_test(
fn_tree="paddle.tensor.math.reciprocal_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_reciprocal_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# remainder_
@handle_frontend_test(
fn_tree="paddle.tensor.math.remainder_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_remainder_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# round_
@handle_frontend_test(
fn_tree="paddle.tensor.math.round_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
),
)
def test_paddle_round_(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# rsqrt_
@handle_frontend_test(
fn_tree="paddle.tensor.math.rsqrt_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_rsqrt_(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sqrt_
@handle_frontend_test(
fn_tree="paddle.tensor.math.sqrt_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_sqrt_(
*,
dtype_and_x,
fn_tree,
frontend,
test_flags,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# subtract_
@handle_frontend_test(
fn_tree="paddle.tensor.math.subtract_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_subtract_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
on_device=on_device,
x=x[0],
y=x[1],
)
# tanh_
@handle_frontend_test(
fn_tree="paddle.tensor.math.tanh_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_tanh_(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-2,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_math.py",
"repo_id": "ivy",
"token_count": 4762
} | 62 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "tensorflow"
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/conftest.py",
"repo_id": "ivy",
"token_count": 32
} | 63 |
# global
import sys
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _matrix_rank_helper
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
matrix_is_stable,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_chain_matmul_dtype_and_arrays(draw):
dtype = draw(helpers.get_dtypes("float", full=True))
input_dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
matrices_dims = draw(
st.lists(st.integers(min_value=2, max_value=10), min_size=4, max_size=4)
)
shape_1 = (matrices_dims[0], matrices_dims[1])
shape_2 = (matrices_dims[1], matrices_dims[2])
shape_3 = (matrices_dims[2], matrices_dims[3])
matrix_1 = draw(
helpers.dtype_and_values(
shape=shape_1,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_2 = draw(
helpers.dtype_and_values(
shape=shape_2,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
matrix_3 = draw(
helpers.dtype_and_values(
shape=shape_3,
dtype=input_dtype,
min_value=-10,
max_value=10,
)
)
return input_dtype, [matrix_1[1][0], matrix_2[1][0], matrix_3[1][0]]
@st.composite
def _get_dtype_and_3dbatch_matrices(draw, with_input=False, input_3d=False):
dim_size1 = draw(helpers.ints(min_value=2, max_value=5))
dim_size2 = draw(helpers.ints(min_value=2, max_value=5))
shared_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
batch_size = draw(helpers.ints(min_value=2, max_value=4))
mat1 = draw(
helpers.array_values(
dtype=dtype[0],
shape=(batch_size, dim_size1, shared_size),
min_value=2,
max_value=5,
)
)
mat2 = draw(
helpers.array_values(
dtype=dtype[0],
shape=(batch_size, shared_size, dim_size2),
min_value=2,
max_value=5,
)
)
if with_input:
if input_3d:
input = draw(
helpers.array_values(
dtype=dtype[0],
shape=(batch_size, dim_size1, dim_size2),
min_value=2,
max_value=5,
)
)
return dtype, input, mat1, mat2
input = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size1, dim_size2), min_value=2, max_value=5
)
)
return dtype, input, mat1, mat2
return dtype, mat1, mat2
@st.composite
def _get_dtype_and_matrices(draw):
dim1 = draw(helpers.ints(min_value=2, max_value=7))
dim2 = draw(helpers.ints(min_value=2, max_value=7))
dtype = draw(helpers.get_dtypes("float", full=False))
matr1 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim1, dim2), min_value=2, max_value=10
)
)
matr2 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim1, dim2), min_value=2, max_value=10
)
)
return dtype, matr1, matr2
# helpers
@st.composite
def _get_dtype_and_square_matrix(draw):
dim_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
mat = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=0, max_value=10
)
)
return dtype, mat
@st.composite
def _get_dtype_input_and_mat_vec(draw, *, with_input=False):
dim_size = draw(helpers.ints(min_value=2, max_value=5))
shared_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
mat = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, shared_size), min_value=2, max_value=5
)
)
vec = draw(
helpers.array_values(
dtype=dtype[0], shape=(shared_size,), min_value=2, max_value=5
)
)
if with_input:
input = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5
)
)
return dtype, input, mat, vec
return dtype, mat, vec
@st.composite
def _get_dtype_input_and_matrices(draw, with_input=False):
dim_size1 = draw(helpers.ints(min_value=2, max_value=5))
dim_size2 = draw(helpers.ints(min_value=2, max_value=5))
shared_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
mat1 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size1, shared_size), min_value=2, max_value=5
)
)
mat2 = draw(
helpers.array_values(
dtype=dtype[0], shape=(shared_size, dim_size2), min_value=2, max_value=5
)
)
if with_input:
input = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size1, dim_size2), min_value=2, max_value=5
)
)
return dtype, input, mat1, mat2
return dtype, mat1, mat2
@st.composite
def _get_dtype_input_and_vectors(draw, with_input=False, same_size=False):
dim_size1 = draw(helpers.ints(min_value=2, max_value=5))
dim_size2 = dim_size1 if same_size else draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
vec1 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size1,), min_value=2, max_value=5
)
)
vec2 = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size2,), min_value=2, max_value=5
)
)
if with_input:
input = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size1, dim_size2), min_value=2, max_value=5
)
)
return dtype, input, vec1, vec2
return dtype, vec1, vec2
# --- Main --- #
# ------------ #
# addbmm
@handle_frontend_test(
fn_tree="torch.addbmm",
dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_torch_addbmm(
dtype_and_matrices,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, mat1, mat2 = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=input,
batch1=mat1,
batch2=mat2,
beta=beta,
alpha=alpha,
)
# addmm
@handle_frontend_test(
fn_tree="torch.addmm",
dtype_and_matrices=_get_dtype_input_and_matrices(with_input=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_torch_addmm(
dtype_and_matrices,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, mat1, mat2 = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=input,
mat1=mat1,
mat2=mat2,
beta=beta,
alpha=alpha,
)
# addmv
@handle_frontend_test(
fn_tree="torch.addmv",
dtype_and_matrices=_get_dtype_input_and_mat_vec(with_input=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_torch_addmv(
dtype_and_matrices,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, mat, vec = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=input,
mat=mat,
vec=vec,
beta=beta,
alpha=alpha,
)
# addr
@handle_frontend_test(
fn_tree="torch.addr",
dtype_and_vecs=_get_dtype_input_and_vectors(with_input=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_torch_addr(
dtype_and_vecs,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, vec1, vec2 = dtype_and_vecs
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=input,
vec1=vec1,
vec2=vec2,
beta=beta,
alpha=alpha,
)
# baddbmm
@handle_frontend_test(
fn_tree="torch.baddbmm",
dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_torch_baddbmm(
dtype_and_matrices,
beta,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, batch1, batch2 = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
input=input,
batch1=batch1,
batch2=batch2,
beta=beta,
alpha=alpha,
)
# bmm
@handle_frontend_test(
fn_tree="torch.bmm",
dtype_and_matrices=_get_dtype_and_3dbatch_matrices(),
)
def test_torch_bmm(
dtype_and_matrices,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, mat1, mat2 = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
input=mat1,
mat2=mat2,
)
# chain_matmul
@handle_frontend_test(
fn_tree="torch.chain_matmul",
dtype_and_matrices=_generate_chain_matmul_dtype_and_arrays(),
)
def test_torch_chain_matmul(
*,
dtype_and_matrices,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, matrices = dtype_and_matrices
args = {f"x{i}": matrix for i, matrix in enumerate(matrices)}
test_flags.num_positional_args = len(matrices)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
**args,
)
# cholesky
@handle_frontend_test(
fn_tree="torch.cholesky",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", index=1),
min_value=0,
max_value=10,
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
).filter(
lambda x: np.linalg.cond(x[1]) < 1 / sys.float_info.epsilon
and np.linalg.det(np.asarray(x[1])) != 0
),
upper=st.booleans(),
)
def test_torch_cholesky(
dtype_and_x,
upper,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
x = x[0]
x = (
np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3
) # make symmetric positive-definite
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
input=x,
upper=upper,
)
# dot
@handle_frontend_test(
fn_tree="torch.dot",
dtype_and_vecs=_get_dtype_input_and_vectors(same_size=True),
)
def test_torch_dot(
dtype_and_vecs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vec1, vec2 = dtype_and_vecs
test_flags.num_positional_args = len(dtype_and_vecs) - 1
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vec1,
other=vec2,
)
# ger
@handle_frontend_test(
fn_tree="torch.ger",
dtype_and_vecs=_get_dtype_input_and_vectors(),
)
def test_torch_ger(
dtype_and_vecs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vec1, vec2 = dtype_and_vecs
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vec1,
vec2=vec2,
)
# inner
@handle_frontend_test(
fn_tree="torch.inner",
dtype_and_matrices=_get_dtype_input_and_matrices(with_input=True),
)
def test_torch_inner(
dtype_and_matrices,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input_mat, mat1, mat2 = dtype_and_matrices
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input_mat,
other=mat2,
out=None,
)
# logdet
@handle_frontend_test(
fn_tree="torch.logdet",
dtype_and_x=_get_dtype_and_square_matrix(),
)
def test_torch_logdet(
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
)
# matmul
@handle_frontend_test(
fn_tree="torch.matmul",
dtype_xy=_get_dtype_and_3dbatch_matrices(),
)
def test_torch_matmul(
dtype_xy,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, y = dtype_xy
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
input=x,
other=y,
out=None,
)
# matrix_rank
@handle_frontend_test(
fn_tree="torch.linalg.matrix_rank",
# aliases=["torch.matrix_rank",], deprecated since 1.9. uncomment with multi-version
# testing pipeline
dtype_x_hermitian_atol_rtol=_matrix_rank_helper(),
)
def test_torch_matrix_rank(
dtype_x_hermitian_atol_rtol,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol
assume(matrix_is_stable(x, cond_limit=10))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x,
atol=atol,
rtol=rtol,
hermitian=hermitian,
)
# mm
@handle_frontend_test(
fn_tree="torch.mm",
dtype_xy=_get_dtype_input_and_matrices(),
)
def test_torch_mm(
dtype_xy,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, y = dtype_xy
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-02,
rtol=1e-02,
input=x,
mat2=y,
)
# mv
@handle_frontend_test(
fn_tree="torch.mv",
dtype_mat_vec=_get_dtype_input_and_mat_vec(),
)
def test_torch_mv(
dtype_mat_vec,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, mat, vec = dtype_mat_vec
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=mat,
vec=vec,
out=None,
)
# outer
@handle_frontend_test(
fn_tree="torch.outer",
dtype_and_vecs=_get_dtype_input_and_vectors(),
)
def test_torch_outer(
dtype_and_vecs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vec1, vec2 = dtype_and_vecs
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vec1,
vec2=vec2,
)
# pinverse
@handle_frontend_test(
fn_tree="torch.pinverse",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", index=1),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
),
rtol=st.floats(1e-5, 1e-3),
)
def test_torch_pinverse(
dtype_and_x,
rtol,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=x[0],
rcond=rtol,
)
# qr
@handle_frontend_test(
fn_tree="torch.qr",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", index=1),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=2,
max_value=5,
),
some=st.booleans(),
)
def test_torch_qr(
dtype_and_x,
some,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
input=x[0],
some=some,
)
# svd
@handle_frontend_test(
fn_tree="torch.svd",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", index=1),
min_num_dims=3,
max_num_dims=5,
min_dim_size=2,
max_dim_size=5,
),
some=st.booleans(),
compute=st.booleans(),
)
def test_torch_svd(
dtype_and_x,
some,
compute,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
some=some,
compute_uv=compute,
)
@handle_frontend_test(
fn_tree="torch.trapezoid",
test_with_out=st.just(False),
dtype_y_x=_get_dtype_and_matrices(),
use_x=st.booleans(),
dim=st.integers(min_value=0, max_value=1),
dx=st.floats(),
)
def test_torch_trapezoid(
dtype_y_x,
use_x,
dim,
dx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, y, x = dtype_y_x
if use_x:
test_flags.num_positional_args = 2
kwargs = {"y": y, "x": x, "dim": -1}
else:
test_flags.num_positional_args = 1
kwargs = {"y": y, "dx": dx, "dim": dim}
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**kwargs,
)
# vdot
@handle_frontend_test(
fn_tree="torch.vdot",
dtype_and_vecs=_get_dtype_input_and_vectors(same_size=True),
)
def test_torch_vdot(
dtype_and_vecs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vec1, vec2 = dtype_and_vecs
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=vec1,
other=vec2,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py",
"repo_id": "ivy",
"token_count": 12053
} | 64 |
# global
import numpy as np
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import ivy
from ivy.functional.frontends.torch.nn.functional.loss_functions import (
cosine_similarity,
)
# binary_cross_entropy
@handle_frontend_test(
fn_tree="torch.nn.functional.binary_cross_entropy",
dtype_and_vals=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
shared_dtype=True,
min_value=1.0013580322265625e-05,
max_value=1.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
shape=(5,),
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean", "none", "sum", None]),
)
def test_torch_binary_cross_entropy(
*,
dtype_and_vals,
size_average,
reduce,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_vals
pred_dtype, pred = input_dtype[0], x[0]
true_dtype, true = input_dtype[1], x[1]
weight_dtype, weight = input_dtype[2], x[2]
helpers.test_frontend_function(
input_dtypes=[pred_dtype, true_dtype, weight_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred,
target=true,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
rtol=1e-02,
atol=1e-02,
)
# binary_cross_entropy_with_logits
@handle_frontend_test(
fn_tree="torch.nn.functional.binary_cross_entropy_with_logits",
dtype_and_true=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.0,
max_value=1.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
dtype_and_pred=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1.0013580322265625e-05,
max_value=1.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
dtype_and_weight=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1.0013580322265625e-05,
max_value=1.0,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean", "none", "sum", None]),
dtype_and_pos_weight=st.one_of(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=10,
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
st.just([[None], [None]]),
),
)
def test_torch_binary_cross_entropy_with_logits(
*,
dtype_and_true,
dtype_and_pred,
dtype_and_weight,
size_average,
reduce,
reduction,
dtype_and_pos_weight,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
pred_dtype, pred = dtype_and_pred
true_dtype, true = dtype_and_true
weight_dtype, weight = dtype_and_weight
pos_weight_dtype, pos_weight = dtype_and_pos_weight
helpers.test_frontend_function(
input_dtypes=[
pred_dtype[0],
true_dtype[0],
weight_dtype[0],
pos_weight_dtype[0],
],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred[0],
target=true[0],
weight=weight[0],
size_average=size_average,
reduce=reduce,
reduction=reduction,
pos_weight=pos_weight[0],
)
# cosine_embedding_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.cosine_embedding_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=2,
max_value=5,
min_num_dims=1,
max_num_dims=2,
min_dim_size=2,
shared_dtype=True,
num_arrays=2,
),
margin=st.floats(
min_value=-1.0,
max_value=1.0,
width=16,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_cosine_embedding_loss(
*,
dtype_and_inputs,
margin,
size_average,
reduce,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
input1_dtype, input1 = input_dtype[0], x[0]
input2_dtype, input2 = input_dtype[1], x[1]
ivy.set_backend(backend_fw)
if input1.ndim == input2.ndim == 1:
tar = ivy.array(1.0)
else:
third = input1.shape[0] // 3
ones = ivy.ones(input1.shape[0] - (third * 2))
minus_ones = ivy.ones(third) * -1
randoms = ivy.random_uniform(shape=[third])
tar = ivy.hstack((ones, minus_ones, randoms)).shuffle()
helpers.test_frontend_function(
input_dtypes=[input1_dtype, input2_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input1=input1,
input2=input2,
target=tar,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
ivy.previous_backend()
# cross_entropy
@handle_frontend_test(
fn_tree="torch.nn.functional.cross_entropy",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
allow_inf=False,
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
),
dtype_and_target=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.0,
max_value=1.0,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
dtype_and_weights=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean", "none", "sum"]),
label_smoothing=helpers.floats(min_value=0, max_value=0.49),
)
def test_torch_cross_entropy(
*,
dtype_and_input,
dtype_and_target,
dtype_and_weights,
size_average,
reduce,
reduction,
label_smoothing,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
inputs_dtype, input = dtype_and_input
target_dtype, target = dtype_and_target
weights_dtype, weights = dtype_and_weights
helpers.test_frontend_function(
input_dtypes=inputs_dtype + target_dtype + weights_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
target=target[0],
weight=weights[0],
size_average=size_average,
reduce=reduce,
reduction=reduction,
label_smoothing=label_smoothing,
)
# gaussian_nll_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.gaussian_nll_loss",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=3,
min_value=0.01,
max_value=1.0,
allow_inf=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
full=st.booleans(),
eps=st.floats(
min_value=0.0,
max_value=1.0,
allow_nan=False,
allow_infinity=False,
),
reduction=st.sampled_from(["mean", "sum"]),
)
def test_torch_gaussian_nll_loss(
*,
dtype_and_input,
full,
eps,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
inputs_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=inputs_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
target=input[1],
var=input[2],
full=full,
eps=eps,
reduction=reduction,
atol=1e-2,
rtol=1e-2,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.hinge_embedding_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-100,
max_value=100,
allow_inf=False,
),
margin=st.floats(min_value=-10, max_value=10),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_hinge_embedding_loss(
*,
dtype_and_x,
margin,
size_average,
reduce,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_x
input, target = x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
target=target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
atol=1e-5,
rtol=1e-5,
)
# huber_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.huber_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
delta=helpers.floats(min_value=0, max_value=5),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_huber_loss(
*,
dtype_and_x,
delta,
reduction,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
pred_dtype, pred = input_dtype[0], x[0]
true_dtype, true = input_dtype[1], x[1]
helpers.test_frontend_function(
input_dtypes=[pred_dtype, true_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred,
target=true,
reduction=reduction,
delta=delta,
)
# kl_div
@handle_frontend_test(
fn_tree="torch.nn.functional.kl_div",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=1e-04,
max_value=1,
),
dtype_and_target=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=1e-04,
max_value=1,
),
reduction=st.sampled_from(["none", "sum", "batchmean", "mean"]),
log_target=st.booleans(),
size_average=st.one_of(st.just(None), st.booleans()),
reduce=st.one_of(st.just(None), st.booleans()),
)
def test_torch_kl_div(
*,
dtype_and_input,
dtype_and_target,
size_average,
reduce,
reduction,
log_target,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, input = dtype_and_input
input[0] = np.array(np.log(input[0]))
target_dtype, target = dtype_and_target
if log_target:
target[0] = np.array(np.log(target[0]))
helpers.test_frontend_function(
input_dtypes=input_dtype + target_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
target=target[0],
size_average=size_average,
reduce=reduce,
reduction=reduction,
log_target=log_target,
)
# l1_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.l1_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_l1_loss(
*,
dtype_and_x,
size_average,
reduce,
reduction,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
pred_dtype, pred = input_dtype[0], x[0]
true_dtype, true = input_dtype[1], x[1]
helpers.test_frontend_function(
input_dtypes=[pred_dtype, true_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred,
target=true,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# margin ranking loss
@handle_frontend_test(
fn_tree="torch.nn.functional.margin_ranking_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
shared_dtype=True,
),
margin=st.floats(),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_margin_ranking_loss(
*,
dtype_and_inputs,
margin,
size_average,
reduce,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
input1_dtype, input1 = input_dtype[0], x[0]
input2_dtype, input2 = input_dtype[1], x[1]
tar_dtype, tar = input_dtype[2], x[2]
helpers.test_frontend_function(
input_dtypes=[input1_dtype, input2_dtype, tar_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input1=input1,
input2=input2,
target=tar,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# mse_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.mse_loss",
dtype_and_true=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.0,
max_value=1.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
dtype_and_pred=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.0,
max_value=1.0,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
allow_inf=False,
exclude_min=True,
exclude_max=True,
min_num_dims=1,
max_num_dims=1,
min_dim_size=2,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean"]),
test_with_out=st.just(False),
)
def test_torch_mse_loss(
*,
dtype_and_true,
dtype_and_pred,
size_average,
reduce,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
pred_dtype, pred = dtype_and_pred
true_dtype, true = dtype_and_true
helpers.test_frontend_function(
input_dtypes=[pred_dtype[0], true_dtype[0]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=pred[0],
target=true[0],
size_average=size_average,
reduce=reduce,
reduction=reduction,
on_device=on_device,
)
# multilabel_margin_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.multilabel_margin_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
min_num_dims=1,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_multilabel_margin_loss(
*,
dtype_and_inputs,
reduction,
size_average,
reduce,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
helpers.test_frontend_function(
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
target=x[1],
reduction=reduction,
size_average=size_average,
reduce=reduce,
)
# multilabel soft margin loss
@handle_frontend_test(
fn_tree="torch.nn.functional.multilabel_soft_margin_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
min_num_dims=1,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_multilabel_soft_margin_loss(
*,
dtype_and_inputs,
size_average,
reduce,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
target=x[1],
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# nll_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.nll_loss",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0.01,
max_value=1.0,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
),
dtype_and_target=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=0.0,
max_value=1.0,
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
),
dtype_and_weights=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
allow_inf=False,
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean", "none", "sum"]),
)
def test_torch_nll_loss(
*,
dtype_and_input,
dtype_and_target,
dtype_and_weights,
size_average,
reduce,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
inputs_dtype, input = dtype_and_input
target_dtype, target = dtype_and_target
weights_dtype, weights = dtype_and_weights
helpers.test_frontend_function(
input_dtypes=inputs_dtype + target_dtype + weights_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
target=target[0],
weight=weights[0],
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# poisson_nll_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.poisson_nll_loss",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=0.0,
max_value=1.0,
allow_inf=False,
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
),
log_input=st.booleans(),
full=st.booleans(),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["mean", "none", "sum"]),
)
def test_torch_poisson_nll_loss(
*,
dtype_and_input,
log_input,
full,
size_average,
reduce,
reduction,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
inputs_dtype, input = dtype_and_input
helpers.test_frontend_function(
input_dtypes=inputs_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
target=input[1],
log_input=log_input,
full=full,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# smooth_l1_loss
@handle_frontend_test(
fn_tree="torch.nn.functional.smooth_l1_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
beta=st.sampled_from([1.0, 0.5, 0.1, 0.0]),
test_with_out=st.just(False),
)
def test_torch_smooth_l1_loss(
*,
dtype_and_x,
size_average,
reduce,
reduction,
beta,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
pred_dtype, pred = input_dtype[0], x[0]
true_dtype, true = input_dtype[1], x[1]
helpers.test_frontend_function(
input_dtypes=[pred_dtype, true_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred,
target=true,
size_average=size_average,
reduce=reduce,
reduction=reduction,
beta=beta,
)
# soft margin loss
@handle_frontend_test(
fn_tree="torch.nn.functional.soft_margin_loss",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_soft_margin_loss(
*,
dtype_and_x,
size_average,
reduce,
reduction,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
pred_dtype, pred = input_dtype[0], x[0]
tar_dtype, tar = input_dtype[1], x[1]
helpers.test_frontend_function(
input_dtypes=[pred_dtype, tar_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=pred,
target=tar,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# triplet margin loss
@handle_frontend_test(
fn_tree="torch.nn.functional.triplet_margin_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
shared_dtype=True,
min_value=0.0,
max_value=1.0,
min_num_dims=1,
max_num_dims=2,
min_dim_size=1,
),
margin=st.floats(),
p=st.integers(min_value=0, max_value=2),
swap=st.booleans(),
size_average=st.booleans(),
reduce=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_triplet_margin_loss(
*,
dtype_and_inputs,
margin,
p,
swap,
size_average,
reduce,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
anchor_dtype, anchor = input_dtype[0], x[0]
positive_dtype, positive = input_dtype[1], x[1]
negative_dtype, negative = input_dtype[2], x[2]
helpers.test_frontend_function(
input_dtypes=[anchor_dtype, positive_dtype, negative_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
anchor=anchor,
positive=positive,
negative=negative,
margin=margin,
p=p,
swap=swap,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
# triplet margin distance loss
@handle_frontend_test(
fn_tree="torch.nn.functional.triplet_margin_with_distance_loss",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=3,
allow_inf=False,
shared_dtype=True,
min_value=0.0,
max_value=1.0,
min_num_dims=1,
max_num_dims=2,
min_dim_size=1,
),
distance_function=st.sampled_from([cosine_similarity, None]),
margin=st.floats(min_value=-10, max_value=10),
swap=st.booleans(),
reduction=st.sampled_from(["none", "mean", "sum"]),
test_with_out=st.just(False),
)
def test_torch_triplet_margin_with_distance_loss(
*,
dtype_and_inputs,
distance_function,
margin,
swap,
reduction,
test_flags,
fn_tree,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_inputs
anchor_dtype, anchor = input_dtype[0], x[0]
positive_dtype, positive = input_dtype[1], x[1]
negative_dtype, negative = input_dtype[2], x[2]
test_flags.num_positional_args = len(x)
helpers.test_frontend_function(
input_dtypes=[anchor_dtype, positive_dtype, negative_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
anchor=anchor,
positive=positive,
negative=negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_loss_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_loss_functions.py",
"repo_id": "ivy",
"token_count": 13844
} | 65 |
# global
import numpy as np
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _nms_helper(draw, batched=False):
img_width = draw(st.integers(250, 1250))
img_height = draw(st.integers(250, 1250))
num_boxes = draw(st.integers(5, 50))
bbox = {}
for _ in range(num_boxes):
x1 = draw(st.integers(0, img_width - 20))
w = draw(st.integers(5, img_width - x1))
y1 = draw(st.integers(0, img_height - 20))
h = draw(st.integers(5, img_height - y1))
bbox[(x1, y1, x1 + w, y1 + h)] = draw(st.floats(0.1, 0.7))
iou_threshold = draw(st.floats(0.2, 0.5))
idxs = None
if batched:
bbox_len = len(bbox)
num_of_categories = draw(st.integers(1, max(bbox_len // 2, 2)))
idxs = np.arange(num_of_categories)
idxs = np.random.choice(idxs, size=bbox_len)
return (
["float32", "float32"],
np.array(list(bbox.keys()), dtype=np.float32),
np.array(list(bbox.values()), dtype=np.float32),
iou_threshold,
idxs,
)
@st.composite
def _roi_align_helper(draw):
dtype = draw(helpers.get_dtypes("valid"))[0]
N = draw(st.integers(1, 5))
C = draw(st.integers(1, 5))
H = W = draw(st.integers(5, 20))
img_width = img_height = draw(st.integers(50, 100))
spatial_scale = H / img_height
output_size = draw(st.integers(H - 2, H + 5))
sampling_ratio = draw(st.one_of(st.just(-1), st.integers(1, 3)))
aligned = draw(st.booleans())
input = draw(
helpers.array_values(
dtype=dtype,
shape=(N, C, H, W),
min_value=-3,
max_value=3,
)
)
bbox = {}
for i in range(N):
num_boxes = draw(st.integers(1, 5))
for _ in range(num_boxes):
x1 = draw(st.integers(0, img_width - 20))
w = draw(st.integers(5, img_width - x1))
y1 = draw(st.integers(0, img_height - 20))
h = draw(st.integers(5, img_height - y1))
bbox[(i, x1, y1, x1 + w, y1 + h)] = 1
return (
[dtype],
input,
np.array(list(bbox.keys()), dtype=dtype).reshape((-1, 5)),
output_size,
spatial_scale,
sampling_ratio,
aligned,
)
# --- Main --- #
# ------------ #
# batched_nms
@handle_frontend_test(
fn_tree="torchvision.ops.batched_nms",
dts_boxes_scores_iou_idxs=_nms_helper(batched=True),
test_with_out=st.just(False),
)
def test_torchvision_batched_nms(
*,
dts_boxes_scores_iou_idxs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dts, boxes, scores, iou, idxs = dts_boxes_scores_iou_idxs
helpers.test_frontend_function(
input_dtypes=dts,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes=boxes,
scores=scores,
idxs=idxs,
iou_threshold=iou,
)
# box_area
@handle_frontend_test(
fn_tree="torchvision.ops.box_area",
boxes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(helpers.ints(min_value=1, max_value=5), st.just(4)),
),
)
def test_torchvision_box_area(
*,
boxes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, boxes = boxes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes=boxes[0],
)
@handle_frontend_test(
fn_tree="torchvision.ops.box_iou",
boxes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(helpers.ints(min_value=1, max_value=5), st.just(4)),
num_arrays=2,
),
)
def test_torchvision_box_iou(
*,
boxes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, boxes = boxes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes1=boxes[0],
boxes2=boxes[1],
)
@handle_frontend_test(
fn_tree="torchvision.ops.clip_boxes_to_image",
boxes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.tuples(helpers.ints(min_value=1, max_value=5), st.just(4)),
),
size=st.tuples(
helpers.ints(min_value=1, max_value=256),
helpers.ints(min_value=1, max_value=256),
),
)
def test_torchvision_clip_boxes_to_image(
*,
boxes,
size,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, boxes = boxes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes=boxes[0],
size=size,
)
# nms
@handle_frontend_test(
fn_tree="torchvision.ops.nms",
dts_boxes_scores_iou=_nms_helper(),
test_with_out=st.just(False),
)
def test_torchvision_nms(
*,
dts_boxes_scores_iou,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dts, boxes, scores, iou, _ = dts_boxes_scores_iou
helpers.test_frontend_function(
input_dtypes=dts,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes=boxes,
scores=scores,
iou_threshold=iou,
)
# remove_small_boxes
@handle_frontend_test(
fn_tree="torchvision.ops.remove_small_boxes",
boxes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.tuples(helpers.ints(min_value=1, max_value=5), st.just(4)),
),
min_size=helpers.floats(
min_value=0.0,
max_value=10,
small_abs_safety_factor=2,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_torchvision_remove_small_boxes(
*,
boxes,
min_size,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, boxes = boxes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
boxes=boxes[0],
min_size=min_size,
)
# roi_align
@handle_frontend_test(
fn_tree="torchvision.ops.roi_align",
inputs=_roi_align_helper(),
test_with_out=st.just(False),
)
def test_torchvision_roi_align(
*,
inputs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, input, boxes, output_size, spatial_scale, sampling_ratio, aligned = inputs
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
boxes=boxes,
output_size=output_size,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
aligned=aligned,
rtol=1e-5,
atol=1e-5,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torchvision/test_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torchvision/test_ops.py",
"repo_id": "ivy",
"token_count": 3845
} | 66 |
"""Collection of tests for unified reduction functions."""
# global
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
# --- Helpers --- #
# --------------- #
@st.composite
def _gen_randint_data(draw):
dtype = draw(helpers.get_dtypes("signed_integer", full=False))
dim1 = draw(helpers.ints(min_value=1, max_value=5))
dim2 = draw(helpers.ints(min_value=2, max_value=8))
low = draw(
helpers.array_values(
dtype=dtype[0],
shape=(dim1, dim2),
min_value=-100,
max_value=25,
)
)
high = draw(
helpers.array_values(
dtype=dtype[0],
shape=(dim1, dim2),
min_value=26,
max_value=100,
)
)
return dtype, low, high
@st.composite
def _pop_size_num_samples_replace_n_probs(draw):
prob_dtype = draw(helpers.get_dtypes("float", full=False))
batch_size = draw(helpers.ints(min_value=1, max_value=5))
population_size = draw(helpers.ints(min_value=1, max_value=20))
replace = draw(st.booleans())
if replace:
num_samples = draw(helpers.ints(min_value=1, max_value=20))
else:
num_samples = draw(helpers.ints(min_value=1, max_value=population_size))
probs = draw(
helpers.array_values(
dtype=prob_dtype[0],
shape=[batch_size, num_samples],
min_value=1.0013580322265625e-05,
max_value=1.0,
exclude_min=True,
large_abs_safety_factor=2,
safety_factor_scale="linear",
)
)
return prob_dtype, batch_size, population_size, num_samples, replace, probs
# --- Main --- #
# ------------ #
# multinomial
@handle_test(
fn_tree="functional.ivy.multinomial",
everything=_pop_size_num_samples_replace_n_probs(),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
test_instance_method=st.just(False),
)
def test_multinomial(*, everything, seed, test_flags, backend_fw, fn_name, on_device):
prob_dtype, batch_size, population_size, num_samples, replace, probs = everything
def call():
return helpers.test_function(
input_dtypes=prob_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
population_size=population_size,
num_samples=num_samples,
batch_size=batch_size,
probs=probs[0] if probs is not None else probs,
replace=replace,
seed=seed,
)
ret = call()
if not ivy.exists(ret):
return
ret_np, ret_from_np = ret
if seed:
ret_np1, ret_from_np1 = call()
assert ivy.any(ret_np == ret_np1)
ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw)
ret_from_np = helpers.flatten_and_to_np(
ret=ret_from_np, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret_np, ret_from_np):
assert u.dtype == v.dtype
assert u.shape == v.shape
# randint
@handle_test(
fn_tree="functional.ivy.randint",
dtype_low_high=_gen_randint_data(),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_randint(*, dtype_low_high, seed, test_flags, backend_fw, fn_name, on_device):
dtype, low, high = dtype_low_high
def call():
return helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
low=low,
high=high,
shape=None,
dtype=dtype[0],
seed=seed,
)
ret, ret_gt = call()
if seed:
ret1, ret_gt1 = call()
assert ivy.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
assert ivy.all(u >= low)
assert ivy.all(u < high)
assert ivy.all(v >= low)
assert ivy.all(v < high)
# random_normal
@handle_test(
fn_tree="functional.ivy.random_normal",
dtype_and_mean=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1000,
max_value=1000,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
dtype_and_std=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1000,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_random_normal(
dtype_and_mean,
dtype_and_std,
dtype,
seed,
test_flags,
backend_fw,
fn_name,
on_device,
):
mean_dtype, mean = dtype_and_mean
std_dtype, std = dtype_and_std
def call():
return helpers.test_function(
input_dtypes=mean_dtype + std_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
mean=mean[0],
std=std[0],
shape=None,
dtype=dtype[0],
seed=seed,
)
ret, ret_gt = call()
if seed:
ret1, ret_gt1 = call()
assert ivy.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
assert u.dtype == v.dtype
# random_uniform
@handle_test(
fn_tree="functional.ivy.random_uniform",
dtype_and_low=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1000,
max_value=100,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
dtype_and_high=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=101,
max_value=1000,
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_random_uniform(
*,
dtype_and_low,
dtype_and_high,
dtype,
seed,
test_flags,
backend_fw,
fn_name,
on_device
):
low_dtype, low = dtype_and_low
high_dtype, high = dtype_and_high
def call():
return helpers.test_function(
input_dtypes=low_dtype + high_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
low=low[0],
high=high[0],
shape=None,
dtype=dtype[0],
seed=seed,
)
ret, ret_gt = call()
if seed:
ret1, ret_gt2 = call()
assert ivy.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
assert u.dtype == v.dtype
# seed
@handle_test(
fn_tree="functional.ivy.seed",
seed_val=helpers.ints(min_value=0, max_value=2147483647),
)
def test_seed(seed_val, backend_fw):
# smoke test
with BackendHandler.update_backend(backend_fw) as ivy_backend:
ivy_backend.seed(seed_value=seed_val)
# shuffle
@handle_test(
fn_tree="functional.ivy.shuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
allow_inf=False,
min_num_dims=1,
min_dim_size=2,
),
seed=helpers.ints(min_value=0, max_value=100),
axis=helpers.ints(min_value=0, max_value=0),
test_gradients=st.just(False),
)
def test_shuffle(
*, dtype_and_x, seed, axis, test_flags, backend_fw, fn_name, on_device
):
dtype, x = dtype_and_x
def call():
return helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
x=x[0],
axis=axis,
seed=seed,
)
ret, ret_gt = call()
if seed:
ret1, ret_gt1 = call()
assert ivy.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
assert ivy.all(ivy.sort(u, axis=0) == ivy.sort(v, axis=0))
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_random.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_random.py",
"repo_id": "ivy",
"token_count": 4631
} | 67 |
"""Collection of tests for unified neural network layers."""
# global
from hypothesis import strategies as st, assume
import ivy
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
from ivy.functional.ivy.layers import _deconv_length, _pack_padded_sequence
# --- Helpers --- #
# --------------- #
def _assume_tf_dilation_gt_1(backend_fw, on_device, dilations):
if backend_fw == "tensorflow":
assume(
not (
on_device == "cpu" and (dilations > 1)
if isinstance(dilations, int)
else any(d > 1 for d in dilations)
)
)
# Dropout #
# --------#
@st.composite
def _dropout_helper(draw):
mixed_fn_compos = draw(st.booleans())
is_torch_backend = ivy.current_backend_str() == "torch"
shape = draw(helpers.get_shape(min_num_dims=1))
dtype = draw(
helpers.get_dtypes("float", full=False, mixed_fn_compos=mixed_fn_compos)
)
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"float", mixed_fn_compos=mixed_fn_compos
),
shape=shape,
)
)
noise_shape = list(shape)
if draw(st.booleans()):
noise_shape = None
else:
for i, _ in enumerate(noise_shape):
if draw(st.booleans()):
noise_shape[i] = 1
elif draw(st.booleans()):
noise_shape[i] = None
seed = draw(helpers.ints(min_value=0, max_value=100))
prob = draw(helpers.floats(min_value=0, max_value=0.9))
scale = draw(st.booleans())
training = draw(st.booleans())
if is_torch_backend and not mixed_fn_compos:
noise_shape = None
seed = None
return dtype_and_x, noise_shape, seed, dtype, prob, scale, training
@st.composite
def _general_transpose_helper(draw):
dims = draw(st.integers(1, 3))
padding = st.sampled_from(["SAME", "VALID"]) if dims != 2 else None
x_f_d_df = draw(
_x_and_filters(
dim=dims,
general=True,
transpose=True,
bias=True,
padding=padding,
)
)
return dims, x_f_d_df
@st.composite
def _lstm_helper(draw):
dtype = draw(helpers.get_dtypes("float", full=False))
has_ih_bias = draw(st.booleans())
has_hh_bias = draw(st.booleans())
weights_transposed = draw(st.booleans())
bidirectional = draw(st.booleans())
dropout = draw(st.floats(min_value=0, max_value=0.99))
train = draw(st.booleans()) and not dropout
packed = draw(st.booleans())
batch_first = draw(st.booleans()) and not packed
num_batches = draw(st.integers(min_value=1, max_value=5))
num_layers = draw(st.integers(min_value=1, max_value=3))
num_directions = 2 if bidirectional else 1
seq_size = draw(st.integers(min_value=1, max_value=5))
in_size = draw(st.integers(min_value=1, max_value=3))
hidden_size = draw(st.integers(min_value=1, max_value=3))
input = draw(
helpers.array_values(
dtype=dtype[0],
shape=(
(num_batches, seq_size, in_size)
if batch_first
else (seq_size, num_batches, in_size)
),
min_value=0,
max_value=1,
)
)
init_h = draw(
helpers.array_values(
dtype=dtype[0],
shape=(num_directions * num_layers, num_batches, hidden_size),
min_value=0,
max_value=1,
)
)
init_c = draw(
helpers.array_values(
dtype=dtype[0],
shape=(num_directions * num_layers, num_batches, hidden_size),
min_value=0,
max_value=1,
)
)
all_weights = []
for k in range(num_layers):
for _ in range(num_directions):
weight_ih = draw(
helpers.array_values(
dtype=dtype[0],
shape=(
(4 * hidden_size, in_size)
if k == 0
else (4 * hidden_size, num_directions * hidden_size)
),
min_value=0,
max_value=1,
)
)
weight_hh = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size, hidden_size),
min_value=0,
max_value=1,
)
)
all_weights += [weight_ih, weight_hh]
if has_ih_bias:
bias_ih = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size,),
min_value=0,
max_value=1,
)
)
all_weights.append(bias_ih)
if has_hh_bias:
bias_hh = draw(
helpers.array_values(
dtype=dtype[0],
shape=(4 * hidden_size,),
min_value=0,
max_value=1,
)
)
all_weights.append(bias_hh)
if weights_transposed:
all_weights = [
ivy.swapaxes(w, 0, 1) if w.dims() == 2 else w for w in all_weights
]
if packed:
batch_sizes = [seq_size]
batch_sizes += draw(
st.lists(
st.integers(min_value=1, max_value=seq_size),
min_size=num_batches - 1,
max_size=num_batches - 1,
)
)
batch_sizes = np.array(draw(st.permutations(batch_sizes)))
input, batch_sizes = (
ivy.to_numpy(p) for p in _pack_padded_sequence(input, batch_sizes)
)
else:
batch_sizes = None
initial_states = init_h, init_c
all_weights = tuple(all_weights)
if batch_sizes is not None:
dtypes = dtype + ["int64"]
kwargs = {
"input": input,
"batch_sizes": batch_sizes,
"initial_states": initial_states,
"all_weights": all_weights,
"num_layers": num_layers,
"dropout": dropout,
"train": train,
"bidirectional": bidirectional,
"weights_transposed": weights_transposed,
"has_ih_bias": has_ih_bias,
"has_hh_bias": has_hh_bias,
}
else:
dtypes = dtype
kwargs = {
"input": input,
"initial_states": initial_states,
"all_weights": all_weights,
"num_layers": num_layers,
"dropout": dropout,
"train": train,
"bidirectional": bidirectional,
"batch_first": batch_first,
"weights_transposed": weights_transposed,
"has_ih_bias": has_ih_bias,
"has_hh_bias": has_hh_bias,
}
return dtypes, kwargs
@st.composite
def _mha_helper(draw, same_pre_embed_dim=False, batch_second=False):
_qkv_same_dim = draw(st.booleans())
_self_attention = draw(st.booleans())
_same_pre_embed_dim = _self_attention or same_pre_embed_dim or draw(st.booleans())
batch_first = draw(st.booleans()) and not batch_second
num_heads = draw(helpers.ints(min_value=1, max_value=3))
_embed_dim = draw(helpers.ints(min_value=4, max_value=16)) * num_heads
_batch_dim = draw(st.sampled_from([(), (1,)]))
_num_batches = _batch_dim[0] if len(_batch_dim) else 1
dtype = draw(helpers.get_dtypes("valid", full=False))
_num_queries = draw(helpers.ints(min_value=2, max_value=8))
_num_keys = draw(helpers.ints(min_value=2, max_value=8))
in_proj_weights = None
q_proj_weights = None
k_proj_weights = None
v_proj_weights = None
if _qkv_same_dim:
if _same_pre_embed_dim:
_pre_embed_dim = _embed_dim
else:
_pre_embed_dim = draw(helpers.ints(min_value=4, max_value=16))
q = draw(
helpers.array_values(
shape=(*_batch_dim, _num_queries, _pre_embed_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
)
k = draw(
helpers.array_values(
shape=(*_batch_dim, _num_keys, _pre_embed_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
if not _self_attention
else st.none()
)
v = draw(
helpers.array_values(
shape=(*_batch_dim, _num_keys, _pre_embed_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
if not _self_attention
else st.none()
)
in_proj_weights = draw(
helpers.array_values(
dtype=dtype[0],
shape=(3 * _embed_dim, _pre_embed_dim),
min_value=-10,
max_value=10,
)
if not _same_pre_embed_dim or draw(st.booleans())
else st.none()
)
else:
if not same_pre_embed_dim:
_q_dim = draw(helpers.ints(min_value=2, max_value=8))
else:
_q_dim = _embed_dim
_k_dim = draw(helpers.ints(min_value=2, max_value=8))
_v_dim = draw(helpers.ints(min_value=2, max_value=8))
q = draw(
helpers.array_values(
shape=(*_batch_dim, _num_queries, _q_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
)
k = draw(
helpers.array_values(
shape=(*_batch_dim, _num_keys, _k_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
)
v = draw(
helpers.array_values(
shape=(*_batch_dim, _num_keys, _v_dim),
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
)
)
q_proj_weights = draw(
helpers.array_values(
dtype=dtype[0],
shape=(_embed_dim, _q_dim),
min_value=-5,
max_value=5,
)
)
k_proj_weights = draw(
helpers.array_values(
dtype=dtype[0],
shape=(_embed_dim, _k_dim),
min_value=-5,
max_value=5,
)
)
v_proj_weights = draw(
helpers.array_values(
dtype=dtype[0],
shape=(_embed_dim, _v_dim),
min_value=-5,
max_value=5,
)
)
in_proj_bias = draw(
st.one_of(
helpers.array_values(
dtype=dtype[0],
shape=(3 * _embed_dim,),
min_value=-10,
max_value=10,
),
st.none(),
)
)
_out_dim = draw(helpers.ints(min_value=4, max_value=16))
out_proj_weights = draw(
st.one_of(
helpers.array_values(
dtype=dtype[0],
shape=(_out_dim, _embed_dim),
min_value=-5,
max_value=5,
),
st.none(),
)
)
out_proj_bias = draw(
st.one_of(
helpers.array_values(
dtype=dtype[0],
shape=(_out_dim,),
min_value=-10,
max_value=10,
),
st.none(),
)
)
if _self_attention and _qkv_same_dim:
_num_keys = _num_queries
_static_shape = (_num_batches * num_heads, _num_keys, int(_embed_dim // num_heads))
static_k = draw(
st.one_of(
helpers.array_values(
shape=_static_shape,
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
),
st.none(),
)
)
static_v = draw(
st.one_of(
helpers.array_values(
shape=_static_shape,
dtype=dtype[0],
max_value=1000,
min_value=-1000,
abs_smallest_val=1e-06,
),
st.none(),
)
)
_mask_shape = (_num_queries, _num_keys)
if len(_batch_dim) and draw(st.booleans()):
_mask_shape = (_num_batches * num_heads, *_mask_shape)
attention_mask = draw(
st.one_of(
helpers.array_values(
dtype=draw(st.sampled_from(["bool", dtype[0]])),
allow_inf=True,
shape=_mask_shape,
),
st.none(),
)
)
key_padding_mask = draw(
st.one_of(
helpers.array_values(
dtype="bool",
shape=(*_batch_dim, _num_keys),
),
st.none(),
)
)
_extra_bias = (
(not _qkv_same_dim or _pre_embed_dim == _embed_dim)
and static_k is None
and static_v is None
and draw(st.booleans())
)
bias_k = draw(
helpers.array_values(
dtype=dtype[0], shape=(_embed_dim,), min_value=-10, max_value=10
)
if _extra_bias
else st.none()
)
bias_v = draw(
helpers.array_values(
dtype=dtype[0], shape=(_embed_dim,), min_value=-10, max_value=10
)
if _extra_bias
else st.none()
)
scale = draw(st.one_of(st.floats(min_value=0.001), st.none()))
add_zero_attn = draw(st.booleans())
dropout = draw(st.floats(min_value=0, max_value=0.99))
training = draw(st.booleans())
is_causal = draw(st.booleans())
return_attention_weights = draw(st.booleans())
average_attention_weights = draw(st.booleans())
if len(q.shape) == 3 and not batch_first:
q, k, v = (np.swapaxes(x, 0, 1) if x is not None else x for x in [q, k, v])
ret = (
q,
k,
v,
num_heads,
attention_mask,
in_proj_weights,
q_proj_weights,
k_proj_weights,
v_proj_weights,
out_proj_weights,
in_proj_bias,
out_proj_bias,
key_padding_mask,
bias_k,
bias_v,
static_k,
static_v,
scale,
add_zero_attn,
dropout,
training,
is_causal,
return_attention_weights,
average_attention_weights,
batch_first,
)
ret_dtypes = [str(r.dtype) for r in ret if ivy.is_array(r)]
return ret_dtypes, *ret
@st.composite
def _nms_helper(draw):
img_width = draw(st.integers(250, 1250))
img_height = draw(st.integers(250, 1250))
num_boxes = draw(st.integers(5, 50))
bbox = {}
for _ in range(num_boxes):
x1 = draw(st.integers(0, img_width - 20))
w = draw(st.integers(5, img_width - x1))
y1 = draw(st.integers(0, img_height - 20))
h = draw(st.integers(5, img_height - y1))
bbox[(x1, y1, x1 + w, y1 + h)] = draw(st.floats(0.2, 1))
iou_threshold = draw(st.floats(0.2, 1))
max_output_size = draw(st.integers(1, num_boxes))
score_threshold = draw(st.floats(0, 1))
return (
np.array(list(bbox.keys()), dtype=np.float32),
np.array(list(bbox.values()), dtype=np.float32),
iou_threshold,
max_output_size,
score_threshold,
)
# Convolutions #
# -------------#
def _output_shape(dims, dilation, stride, padding, x_shape, filter_shape):
if isinstance(padding, str):
return [
_deconv_length(
x_shape[i],
stride[i],
filter_shape[i],
padding,
dilation[i],
)
for i in range(dims)
]
else:
if isinstance(padding, int):
padding = [[padding, padding]] * dims
return [
(x_shape[i] - 1) * stride[i]
- padding[i][0]
- padding[i][1]
+ dilation[i] * (filter_shape[i] - 1)
+ 1
for i in range(dims)
]
@st.composite
def _roi_align_helper(draw):
dtype = draw(helpers.get_dtypes("float", full=False))[0]
N = draw(st.integers(1, 5))
C = draw(st.integers(1, 5))
H = W = draw(st.integers(5, 20))
img_width = img_height = draw(st.integers(50, 100))
spatial_scale = H / img_height
output_size = draw(st.integers(H - 2, H + 5))
sampling_ratio = draw(st.one_of(st.just(-1), st.integers(1, 3)))
aligned = draw(st.booleans())
input = draw(
helpers.array_values(
dtype=dtype,
shape=(N, C, H, W),
min_value=-3,
max_value=3,
)
)
bbox = {}
for i in range(N):
num_boxes = draw(st.integers(1, 5))
for _ in range(num_boxes):
x1 = draw(st.integers(0, img_width - 20))
w = draw(st.integers(5, img_width - x1))
y1 = draw(st.integers(0, img_height - 20))
h = draw(st.integers(5, img_height - y1))
bbox[(i, x1, y1, x1 + w, y1 + h)] = 1
return (
[dtype],
input,
np.array(list(bbox.keys()), dtype=dtype).reshape((-1, 5)),
output_size,
spatial_scale,
sampling_ratio,
aligned,
)
@st.composite
def _x_and_filters(
draw,
dim: int = 2,
padding=None,
transpose: bool = False,
depthwise=False,
general=False,
bias=False,
):
if not isinstance(dim, int):
dim = draw(dim)
batch_size = draw(st.integers(1, 5))
filter_shape = draw(
helpers.get_shape(
min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5
)
)
dtype = draw(helpers.get_dtypes("float", full=False))
input_channels = draw(st.integers(1, 3))
output_channels = draw(st.integers(1, 3))
group_list = [*range(1, 6)]
if not transpose:
group_list = list(
filter(
lambda x: (input_channels % x == 0 and x <= output_channels), group_list
)
)
else:
group_list = list(filter(lambda x: (output_channels % x == 0), group_list))
fc = draw(st.sampled_from(group_list))
strides = draw(
st.one_of(
st.integers(1, 3), st.lists(st.integers(1, 3), min_size=dim, max_size=dim)
)
if dim > 1
else st.integers(1, 3)
)
dilations = draw(
st.one_of(
st.integers(1, 3), st.lists(st.integers(1, 3), min_size=dim, max_size=dim)
)
if dim > 1
else st.integers(1, 3)
)
if dim == 2:
data_format = draw(st.sampled_from(["NCHW", "NHWC"]))
elif dim == 1:
data_format = draw(st.sampled_from(["NWC", "NCW"]))
else:
data_format = draw(st.sampled_from(["NDHWC", "NCDHW"]))
fdilations = [dilations] * dim if isinstance(dilations, int) else dilations
if padding is None:
padding = st.one_of(
st.lists(
st.tuples(
st.integers(min_value=0, max_value=3),
st.integers(min_value=0, max_value=3),
),
min_size=dim,
max_size=dim,
),
st.sampled_from(["SAME", "VALID"]),
st.integers(min_value=0, max_value=3),
)
padding = draw(padding)
if transpose:
fstrides = [strides] * dim if isinstance(strides, int) else strides
if isinstance(padding, list):
assume(
all(
max(pad) - min(pad) < min(stride, dilation)
for pad, stride, dilation in zip(padding, fstrides, fdilations)
)
)
x_dim = draw(
helpers.get_shape(
min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5
)
)
output_shape = _output_shape(
dim, fdilations, fstrides, padding, x_dim, filter_shape
)
assume(all(s > 0 for s in output_shape))
if draw(st.booleans()):
output_shape = None
else:
x_dim = []
for i in range(dim):
min_x = filter_shape[i] + (filter_shape[i] - 1) * (fdilations[i] - 1)
x_dim.append(draw(st.integers(min_x, min_x + 1)))
x_dim = tuple(x_dim)
if not depthwise:
if not transpose:
output_channels = output_channels * fc
filter_shape = filter_shape + (input_channels // fc, output_channels)
else:
input_channels = input_channels * fc
filter_shape = filter_shape + (output_channels // fc, input_channels)
else:
filter_shape = filter_shape + (input_channels,)
channel_first = True
if data_format in ["NHWC", "NWC", "NDHWC"]:
x_shape = (batch_size,) + x_dim + (input_channels,)
channel_first = False
else:
x_shape = (batch_size, input_channels) + x_dim
vals = draw(
helpers.array_values(
dtype=dtype[0],
shape=x_shape,
min_value=0.0,
max_value=1.0,
)
)
filters = draw(
helpers.array_values(
dtype=dtype[0],
shape=filter_shape,
min_value=0.0,
max_value=1.0,
)
)
if bias:
bias_shape = (output_channels,)
b = draw(
helpers.array_values(
dtype=dtype[0],
shape=bias_shape,
min_value=0.0,
max_value=1.0,
)
)
if general:
data_format = "channel_first" if channel_first else "channel_last"
if not transpose:
x_dilation = draw(
st.one_of(
st.integers(1, 3),
st.lists(st.integers(1, 3), min_size=dim, max_size=dim),
)
)
dilations = (dilations, x_dilation)
if not depthwise:
filter_format = draw(st.sampled_from(["channel_first", "channel_last"]))
if filter_format == "channel_first":
filters = np.transpose(filters, (-1, -2, *range(dim)))
ret = (
dtype,
vals,
filters,
dilations,
data_format,
strides,
padding,
)
ret = ret + (output_shape, fc) if transpose else ret + (fc,)
if not depthwise:
ret = ret + (filter_format,)
if bias:
return ret + (b,)
return ret
# output_shape not in conv_general_dilated
@st.composite
def _x_and_filters_and_transpose(
draw,
dim: int = 2,
general=False,
bias=False,
):
transpose = draw(st.booleans())
all_args = draw(
_x_and_filters(
dim=dim,
general=general,
bias=bias,
transpose=transpose,
)
)
output_shape = None
if transpose:
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
fc,
filter_format,
bias,
) = all_args
else:
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
fc,
filter_format,
bias,
) = all_args
return (
dtype,
x,
filters,
stride,
pad,
transpose,
output_shape,
data_format,
filter_format,
fc,
dilations,
bias,
)
# Linear #
# -------#
@st.composite
def _x_and_linear(draw):
mixed_fn_compos = draw(st.booleans())
is_torch_backend = ivy.current_backend_str() == "torch"
dtype = draw(
# should sample from "valid" but with_supported_dtypes was not working
helpers.get_dtypes("float", full=False, mixed_fn_compos=mixed_fn_compos)
)
in_features = draw(
helpers.ints(min_value=1, max_value=2, mixed_fn_compos=mixed_fn_compos)
)
out_features = draw(
helpers.ints(min_value=1, max_value=2, mixed_fn_compos=mixed_fn_compos)
)
x_shape = (
1,
1,
in_features,
)
weight_shape = (1,) + (out_features,) + (in_features,)
# if backend is torch and we're testing the primary implementation
# weight.ndim should be equal to 2
if is_torch_backend and not mixed_fn_compos:
weight_shape = (out_features,) + (in_features,)
bias_shape = (
1,
out_features,
)
x = draw(
helpers.array_values(dtype=dtype[0], shape=x_shape, min_value=0, max_value=10)
)
weight = draw(
helpers.array_values(
dtype=dtype[0], shape=weight_shape, min_value=0, max_value=10
)
)
bias = draw(
helpers.array_values(
dtype=dtype[0], shape=bias_shape, min_value=0, max_value=10
)
)
return dtype, x, weight, bias
# LSTM #
# -----#
@st.composite
def _x_and_lstm(draw, dtypes):
dtype = draw(dtypes)
batch_shape = (1,)
t = draw(helpers.ints(min_value=1, max_value=2))
_in_ = draw(helpers.ints(min_value=1, max_value=2))
_out_ = draw(helpers.ints(min_value=1, max_value=2))
x_lstm_shape = batch_shape + (t,) + (_in_,)
init_h_shape = batch_shape + (_out_,)
init_c_shape = init_h_shape
kernel_shape = (_in_,) + (4 * _out_,)
recurrent_kernel_shape = (_out_,) + (4 * _out_,)
bias_shape = (4 * _out_,)
recurrent_bias_shape = bias_shape
x_lstm = draw(
helpers.array_values(
dtype=dtype[0], shape=x_lstm_shape, min_value=0, max_value=1
)
)
init_h = draw(
helpers.array_values(
dtype=dtype[0], shape=init_h_shape, min_value=0, max_value=1
)
)
init_c = draw(
helpers.array_values(
dtype=dtype[0], shape=init_c_shape, min_value=0, max_value=1
)
)
kernel = draw(
helpers.array_values(
dtype=dtype[0], shape=kernel_shape, min_value=0, max_value=1
)
)
recurrent_kernel = draw(
helpers.array_values(
dtype=dtype[0], shape=recurrent_kernel_shape, min_value=0, max_value=1
)
)
lstm_bias = draw(
helpers.array_values(dtype=dtype[0], shape=bias_shape, min_value=0, max_value=1)
)
recurrent_bias = draw(
helpers.array_values(
dtype=dtype[0], shape=recurrent_bias_shape, min_value=0, max_value=1
)
)
return (
dtype,
x_lstm,
init_h,
init_c,
kernel,
recurrent_kernel,
lstm_bias,
recurrent_bias,
)
# Attention #
# ----------#
@st.composite
def _x_and_scaled_attention(draw, dtypes):
dtype = draw(dtypes)
num_queries = draw(helpers.ints(min_value=2, max_value=4))
num_keys = draw(helpers.ints(min_value=2, max_value=4))
feat_dim = draw(helpers.ints(min_value=2, max_value=4))
batch_size = draw(helpers.ints(min_value=1, max_value=2))
q_shape = (batch_size,) + (num_queries,) + (feat_dim,)
k_shape = (batch_size,) + (num_keys,) + (feat_dim,)
v_shape = (batch_size,) + (num_keys,) + (feat_dim,)
mask_shape = (batch_size,) + (num_queries,) + (num_keys,)
query = draw(
helpers.array_values(
dtype=dtype[0],
shape=q_shape,
min_value=1e-3,
max_value=1e2,
large_abs_safety_factor=7,
small_abs_safety_factor=7,
safety_factor_scale="linear",
)
)
key = draw(
helpers.array_values(
dtype=dtype[0],
shape=k_shape,
min_value=1e-3,
max_value=1e2,
large_abs_safety_factor=7,
small_abs_safety_factor=7,
safety_factor_scale="linear",
)
)
value = draw(
helpers.array_values(
dtype=dtype[0],
shape=v_shape,
min_value=1e-3,
max_value=1e2,
large_abs_safety_factor=7,
small_abs_safety_factor=7,
safety_factor_scale="linear",
)
)
mask = draw(
helpers.array_values(
dtype="bool",
shape=mask_shape,
)
| st.none()
)
return dtype, query, key, value, mask
# --- Main --- #
# ------------ #
# conv
@handle_test(
fn_tree="functional.ivy.conv",
dims=st.shared(st.integers(1, 3), key="dims"),
x_f_d_df_tr=_x_and_filters_and_transpose(
dim=st.shared(st.integers(1, 3), key="dims"),
general=True,
bias=True,
),
ground_truth_backend="jax",
)
def test_conv(*, dims, x_f_d_df_tr, test_flags, backend_fw, fn_name, on_device):
# pass
(
dtype,
x,
filters,
stride,
pad,
transpose,
output_shape,
data_format,
filter_format,
fc,
dilations,
bias,
) = x_f_d_df_tr
tf_dilations = dilations
if not transpose:
tf_dilations = tf_dilations[0]
dilations, x_dilations = dilations
else:
x_dilations = None
_assume_tf_dilation_gt_1(backend_fw, on_device, tf_dilations)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
transpose=transpose,
dims=dims,
output_shape=output_shape,
data_format=data_format,
filter_format=filter_format,
feature_group_count=fc,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
)
# conv1d
@handle_test(
fn_tree="functional.ivy.conv1d",
x_f_d_df=_x_and_filters(
dim=1,
bias=True,
),
ground_truth_backend="jax",
)
def test_conv1d(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
fc,
ff_format,
bias,
) = x_f_d_df
# ToDo: Enable gradient tests for dilations > 1 when tensorflow supports it.
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations[0])
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x,
filters=filters,
strides=stride,
padding=pad,
data_format=data_format,
filter_format=ff_format,
x_dilations=dilations[1],
dilations=dilations[0],
bias=bias,
)
# conv1d_transpose
@handle_test(
fn_tree="functional.ivy.conv1d_transpose",
x_f_d_df=_x_and_filters(
dim=1,
transpose=True,
bias=True,
padding=st.sampled_from(["SAME", "VALID"]),
),
ground_truth_backend="torch",
)
def test_conv1d_transpose(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
fc,
filter_format,
bias,
) = x_f_d_df
# tensorflow does not work with dilations > 1 on cpu
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
)
# conv2d
@handle_test(
fn_tree="functional.ivy.conv2d",
x_f_d_df=_x_and_filters(
dim=2,
bias=True,
),
ground_truth_backend="jax",
)
def test_conv2d(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
fc,
ff_format,
bias,
) = x_f_d_df
# ToDo: Enable gradient tests for dilations > 1 when tensorflow supports it.
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations[0])
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
data_format=data_format,
filter_format=ff_format,
x_dilations=dilations[1],
dilations=dilations[0],
bias=bias,
)
# conv2d_transpose
@handle_test(
fn_tree="functional.ivy.conv2d_transpose",
x_f_d_df=_x_and_filters(
dim=2,
transpose=True,
bias=True,
),
ground_truth_backend="torch",
)
def test_conv2d_transpose(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
fc,
filter_format,
bias,
) = x_f_d_df
assume(isinstance(pad, str) or backend_fw in ["torch", "tensorflow"])
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
x=x,
filters=filters,
strides=stride,
padding=pad,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
)
# conv3d
@handle_test(
fn_tree="functional.ivy.conv3d",
x_f_d_df=_x_and_filters(
dim=3,
bias=True,
),
ground_truth_backend="jax",
)
def test_conv3d(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
fc,
ff_format,
bias,
) = x_f_d_df
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations[0])
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
data_format=data_format,
filter_format=ff_format,
x_dilations=dilations[1],
dilations=dilations[0],
bias=bias,
)
# conv3d_transpose
@handle_test(
fn_tree="functional.ivy.conv3d_transpose",
x_f_d_df=_x_and_filters(
dim=3,
transpose=True,
bias=True,
padding=st.sampled_from(["SAME", "VALID"]),
),
ground_truth_backend="torch",
)
def test_conv3d_transpose(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
fc,
filter_format,
bias,
) = x_f_d_df
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
)
# conv_general_dilated
@handle_test(
fn_tree="functional.ivy.conv_general_dilated",
dims=st.shared(st.integers(1, 3), key="dims"),
x_f_d_df=_x_and_filters(
dim=st.shared(st.integers(1, 3), key="dims"),
general=True,
bias=True,
),
ground_truth_backend="torch",
)
def test_conv_general_dilated(
*, dims, x_f_d_df, test_flags, backend_fw, fn_name, on_device
):
(
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
fc,
ff_format,
bias,
) = x_f_d_df
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations[0])
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
dims=dims,
data_format=data_format,
filter_format=ff_format,
feature_group_count=fc,
x_dilations=dilations[1],
dilations=dilations[0],
bias=bias,
)
@handle_test(
fn_tree="functional.ivy.conv_general_transpose",
dim_x_f_d_df=_general_transpose_helper(),
ground_truth_backend="torch",
)
def test_conv_general_transpose(
*, dim_x_f_d_df, test_flags, backend_fw, fn_name, on_device
):
dims, (
dtype,
x,
filters,
dilations,
data_format,
stride,
pad,
output_shape,
fc,
filter_format,
bias,
) = dim_x_f_d_df
assume(isinstance(pad, str) or backend_fw in ["torch", "tensorflow"])
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations)
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-1,
atol_=1e-1,
x=x,
filters=filters,
strides=stride,
padding=pad,
dims=dims,
filter_format=filter_format,
data_format=data_format,
output_shape=output_shape,
dilations=dilations,
feature_group_count=fc,
bias=bias,
)
# depthwise_conv2d
@handle_test(
fn_tree="functional.ivy.depthwise_conv2d",
x_f_d_df=_x_and_filters(
dim=2,
depthwise=True,
),
# tensorflow does not support dilations > 1 and stride > 1
ground_truth_backend="jax",
)
def test_depthwise_conv2d(*, x_f_d_df, test_flags, backend_fw, fn_name, on_device):
dtype, x, filters, dilations, data_format, stride, pad, fc = x_f_d_df
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations[0])
# tensorflow only supports equal length strides in row and column
if backend_fw == "tensorflow" and isinstance(stride, list) and len(stride) > 1:
assume(stride[0] == stride[1])
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x,
filters=filters,
strides=stride,
padding=pad,
data_format=data_format,
dilations=dilations[0],
)
# dropout
@handle_test(
fn_tree="functional.ivy.dropout",
data=_dropout_helper(),
test_gradients=st.just(False),
)
def test_dropout(
*,
data,
test_flags,
backend_fw,
fn_name,
on_device,
):
(x_dtype, x), noise_shape, seed, dtype, prob, scale, training = data
if not training or prob == 0:
helpers.test_function(
input_dtypes=x_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
prob=prob,
scale=scale,
noise_shape=noise_shape,
dtype=dtype[0],
training=training,
seed=seed,
)
else:
ret, gt_ret = helpers.test_function(
input_dtypes=x_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
test_values=False,
x=x[0],
prob=prob,
scale=scale,
noise_shape=noise_shape,
dtype=dtype[0],
training=training,
seed=seed,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
gt_ret = helpers.flatten_and_to_np(
ret=gt_ret, backend=test_flags.ground_truth_backend
)
for u, v, w in zip(ret, gt_ret, x):
# cardinality test
assert u.shape == v.shape == w.shape
# linear
@handle_test(
fn_tree="functional.ivy.linear",
dtype_x_weight_bias=_x_and_linear(),
)
def test_linear(*, dtype_x_weight_bias, test_flags, backend_fw, fn_name, on_device):
dtype, x, weight, bias = dtype_x_weight_bias
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x,
weight=weight,
bias=bias,
)
# TODO: fix this test
# lstm
# @handle_test(
# fn_tree="functional.ivy.lstm",
# dtypes_kwargs=_lstm_helper(),
# ground_truth_backend="torch",
# test_with_out=st.just(False),
# )
# def test_lstm(*, dtypes_kwargs, test_flags, backend_fw, fn_name, on_device):
# dtypes, kwargs = dtypes_kwargs
# assume("batch_sizes" not in kwargs)
# helpers.test_function(
# input_dtypes=dtypes,
# test_flags=test_flags,
# backend_to_test=backend_fw,
# fn_name=fn_name,
# on_device=on_device,
# rtol_=1e-01,
# atol_=1e-01,
# **kwargs,
# )
# lstm_update
@handle_test(
fn_tree="functional.ivy.lstm_update",
dtype_lstm=_x_and_lstm(
dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_lstm_update(*, dtype_lstm, test_flags, backend_fw, fn_name, on_device):
(
dtype,
x_lstm,
init_h,
init_c,
kernel,
recurrent_kernel,
bias,
recurrent_bias,
) = dtype_lstm
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-01,
atol_=1e-01,
x=x_lstm,
init_h=init_h,
init_c=init_c,
kernel=kernel,
recurrent_kernel=recurrent_kernel,
bias=bias,
recurrent_bias=recurrent_bias,
)
# multi_head_attention
@handle_test(
fn_tree="functional.ivy.multi_head_attention",
dtype_mha=_mha_helper(),
ground_truth_backend="numpy",
# ToDo: fix the gradients and the container methods
test_gradients=st.just(False),
container_flags=st.just([False]),
)
def test_multi_head_attention(
*,
dtype_mha,
test_flags,
backend_fw,
fn_name,
on_device,
):
(
dtype,
q,
k,
v,
num_heads,
attention_mask,
in_proj_weights,
q_proj_weights,
k_proj_weights,
v_proj_weights,
out_proj_weights,
in_proj_bias,
out_proj_bias,
key_padding_mask,
bias_k,
bias_v,
static_k,
static_v,
scale,
add_zero_attn,
dropout,
training,
is_causal,
return_attention_weights,
average_attention_weights,
batch_first,
) = dtype_mha
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
test_values=(dropout == 0),
atol_=1e-02,
rtol_=1e-02,
query=q,
key=k,
value=v,
batch_first=batch_first,
num_heads=num_heads,
scale=scale,
attention_mask=attention_mask,
in_proj_weights=in_proj_weights,
q_proj_weights=q_proj_weights,
k_proj_weights=k_proj_weights,
v_proj_weights=v_proj_weights,
out_proj_weights=out_proj_weights,
in_proj_bias=in_proj_bias,
out_proj_bias=out_proj_bias,
is_causal=is_causal,
key_padding_mask=key_padding_mask,
bias_k=bias_k,
bias_v=bias_v,
static_k=static_k,
static_v=static_v,
add_zero_attn=add_zero_attn,
return_attention_weights=return_attention_weights,
average_attention_weights=average_attention_weights,
dropout=dropout,
training=training,
)
@handle_test(
fn_tree="functional.ivy.nms",
inputs=_nms_helper(),
test_instance_method=st.just(False),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_nms(
*,
inputs,
test_flags,
backend_fw,
fn_name,
on_device,
):
boxes, scores, iou_threshold, max_output_size, score_threshold = inputs
helpers.test_function(
input_dtypes=[ivy.float32, ivy.float32],
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
boxes=boxes,
scores=scores,
iou_threshold=iou_threshold,
max_output_size=max_output_size,
score_threshold=score_threshold,
)
@handle_test(
fn_tree="functional.ivy.roi_align",
inputs=_roi_align_helper(),
test_instance_method=st.just(False),
test_with_out=st.just(False),
ground_truth_backend="torch",
)
def test_roi_align(
*,
inputs,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtypes, input, boxes, output_size, spatial_scale, sampling_ratio, aligned = inputs
helpers.test_function(
input_dtypes=dtypes,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
input=input,
boxes=boxes,
output_size=output_size,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
aligned=aligned,
rtol_=1e-5,
atol_=1e-5,
)
# scaled_dot_product_attention
@handle_test(
fn_tree="functional.ivy.scaled_dot_product_attention",
dtype_q_k_v_mask=_x_and_scaled_attention(
dtypes=helpers.get_dtypes("float", full=False),
),
scale=st.floats(min_value=0.1, max_value=1),
dropout_p=st.floats(min_value=0, max_value=0.99),
is_causal=st.booleans(),
training=st.just(False), # st.booleans(), disabled until proper testing is used
ground_truth_backend="jax",
test_with_out=st.just(True),
)
def test_scaled_dot_product_attention(
*,
dtype_q_k_v_mask,
scale,
dropout_p,
is_causal,
training,
test_flags,
backend_fw,
fn_name,
on_device,
):
(dtype, query, key, value, mask) = dtype_q_k_v_mask
is_causal = is_causal if mask is None else False
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
atol_=1e-02,
rtol_=1e-02,
query=query,
key=key,
value=value,
scale=scale,
mask=mask,
dropout_p=dropout_p,
is_causal=is_causal,
training=training,
)
| ivy/ivy_tests/test_ivy/test_functional/test_nn/test_layers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_nn/test_layers.py",
"repo_id": "ivy",
"token_count": 26563
} | 68 |
import ivy
import numpy as np
import pytest
@pytest.mark.parametrize(("shape", "rank"), [((5, 4, 6), (3, 2, 3))])
def test_n_param_tucker(shape, rank):
tucker_tensor = ivy.random_tucker(shape, rank)
true_n_param = ivy.prod(ivy.shape(tucker_tensor[0])) + ivy.sum(
[ivy.prod(ivy.shape(f)) for f in tucker_tensor[1]]
)
n_param = tucker_tensor.n_param
assert np.allclose(n_param, true_n_param)
@pytest.mark.parametrize(("shape", "rank"), [((3, 4, 5), 4)])
def test_tucker_copy(shape, rank):
tucker_tensor = ivy.random_tucker(shape, rank)
core, factors = tucker_tensor
core_normalized, factors_normalized = ivy.TuckerTensor.tucker_normalize(
tucker_tensor.tucker_copy()
)
# Check that modifying copy tensor doesn't change the original tensor
assert np.allclose(
ivy.TuckerTensor.tucker_to_tensor((core, factors)),
ivy.TuckerTensor.tucker_to_tensor(tucker_tensor),
)
@pytest.mark.parametrize(("shape", "ranks"), [((5, 4, 6), (3, 2, 3))])
def test_tucker_mode_dot(shape, ranks):
tucker_ten = ivy.random_tucker(shape, ranks, full=False)
full_tensor = ivy.TuckerTensor.tucker_to_tensor(tucker_ten)
# matrix for mode 1
matrix = ivy.random_uniform(shape=(7, shape[1]))
# vec for mode 2
vec = ivy.random_uniform(shape=shape[2])
# Test tucker_mode_dot with matrix
res = ivy.TuckerTensor.tucker_mode_dot(tucker_ten, matrix, mode=1, copy=True)
# Note that if copy=True is not respected, factors will be changes
# And the next test will fail
res = ivy.TuckerTensor.tucker_to_tensor(res)
true_res = ivy.mode_dot(full_tensor, matrix, mode=1)
assert np.allclose(true_res, res)
# Check that the data was indeed copied
rec = ivy.TuckerTensor.tucker_to_tensor(tucker_ten)
assert np.allclose(full_tensor, rec)
# Test tucker_mode_dot with vec
res = ivy.TuckerTensor.tucker_mode_dot(tucker_ten, vec, mode=2, copy=True)
res = ivy.TuckerTensor.tucker_to_tensor(res)
true_res = ivy.mode_dot(full_tensor, vec, mode=2)
assert np.allclose(res.shape, true_res.shape)
assert np.allclose(true_res, res)
@pytest.mark.parametrize(("shape", "rank"), [((3, 4, 5), (3, 2, 4))])
def test_tucker_normalize(shape, rank):
tucker_ten = ivy.random_tucker(shape, rank)
core, factors = ivy.TuckerTensor.tucker_normalize(tucker_ten)
for i in range(len(factors)):
norm = ivy.sqrt(ivy.sum(ivy.abs(factors[i]) ** 2, axis=0))
assert np.allclose(norm, ivy.ones(rank[i]))
assert np.allclose(
ivy.TuckerTensor.tucker_to_tensor((core, factors)),
ivy.TuckerTensor.tucker_to_tensor(tucker_ten),
)
@pytest.mark.parametrize(
("X", "ranks", "true_res"),
[
(
[
[[1.0, 13], [4, 16], [7, 19], [10, 22]],
[[2, 14], [5, 17], [8, 20], [11, 23]],
[[3, 15], [6, 18], [9, 21], [12, 24]],
],
[2, 3, 4],
[
[
[390.0, 1518, 2646, 3774],
[1310, 4966, 8622, 12278],
[2230, 8414, 14598, 20782],
],
[
[1524, 5892, 10260, 14628],
[5108, 19204, 33300, 47396],
[8692, 32516, 56340, 80164],
],
],
)
],
)
def test_tucker_to_tensor(X, ranks, true_res):
"""Test for tucker_to_tensor."""
X = ivy.array(X)
U = [
ivy.arange(R * s, dtype=ivy.float32).reshape((R, s))
for (R, s) in zip(ranks, X.shape)
]
true_res = ivy.array(true_res)
res = ivy.TuckerTensor.tucker_to_tensor((X, U))
assert np.allclose(true_res, res)
@pytest.mark.parametrize(("shape", "ranks"), [((4, 3, 5, 2), (2, 2, 3, 4))])
def test_tucker_to_unfolded(shape, ranks):
G = ivy.random_uniform(shape=shape)
U = [ivy.random_uniform(shape=(ranks[i], G.shape[i])) for i in range(4)]
full_tensor = ivy.TuckerTensor.tucker_to_tensor((G, U))
for mode in range(4):
assert np.allclose(
ivy.TuckerTensor.tucker_to_unfolded((G, U), mode),
ivy.unfold(full_tensor, mode),
)
assert np.allclose(
ivy.TuckerTensor.tucker_to_unfolded((G, U), mode),
ivy.dot(
ivy.dot(U[mode], ivy.unfold(G, mode)),
ivy.permute_dims(ivy.kronecker(U, skip_matrix=mode), (1, 0)),
),
)
@pytest.mark.parametrize(("shape", "ranks"), [((4, 3, 5, 2), (2, 2, 3, 4))])
def test_tucker_to_vec(shape, ranks):
G = ivy.random_uniform(shape=shape)
ranks = [2, 2, 3, 4]
U = [ivy.random_uniform(shape=(ranks[i], G.shape[i])) for i in range(4)]
vec = ivy.reshape(ivy.TuckerTensor.tucker_to_tensor((G, U)), -1)
assert np.allclose(ivy.TuckerTensor.tucker_to_vec((G, U)), vec)
assert np.allclose(
ivy.TuckerTensor.tucker_to_vec((G, U)),
ivy.dot(ivy.kronecker(U), ivy.reshape(G, -1)),
)
@pytest.mark.parametrize("tol", [(0.01)])
def test_validate_tucker_rank(tol):
tensor_shape = tuple(ivy.randint(1, 100, shape=(5,)))
n_param_tensor = ivy.prod(tensor_shape)
# Rounding = floor
rank = ivy.TuckerTensor.validate_tucker_rank(
tensor_shape, rank="same", rounding="floor"
)
n_param = ivy.TuckerTensor.tucker_n_param(tensor_shape, rank)
assert n_param * (1 - tol) <= n_param_tensor
# Rounding = ceil
rank = ivy.TuckerTensor.validate_tucker_rank(
tensor_shape, rank="same", rounding="ceil"
)
n_param = ivy.TuckerTensor.tucker_n_param(tensor_shape, rank)
assert n_param >= n_param_tensor * (1 - tol)
# With fixed modes
fixed_modes = [1, 4]
tensor_shape = [
s**2 if i in fixed_modes else s
for (i, s) in enumerate(ivy.randint(2, 10, shape=(5,)))
]
n_param_tensor = ivy.prod(tensor_shape)
# Floor
rank = ivy.TuckerTensor.validate_tucker_rank(
tensor_shape, rank=0.5, fixed_modes=fixed_modes, rounding="floor"
)
n_param = ivy.TuckerTensor.tucker_n_param(tensor_shape, rank)
for mode in fixed_modes:
assert rank[mode] == tensor_shape[mode]
assert n_param * (1 - tol) <= n_param_tensor * 0.5
# Ceil
fixed_modes = [0, 2]
tensor_shape = [
s**2 if i in fixed_modes else s
for (i, s) in enumerate(ivy.randint(2, 10, shape=(5,)))
]
n_param_tensor = ivy.prod(tensor_shape)
rank = ivy.TuckerTensor.validate_tucker_rank(
tensor_shape, rank=0.5, fixed_modes=fixed_modes, rounding="ceil"
)
n_param = ivy.TuckerTensor.tucker_n_param(tensor_shape, rank)
for mode in fixed_modes:
assert rank[mode] == tensor_shape[mode]
assert n_param >= n_param_tensor * 0.5 * (1 - tol)
# These tests have been adapted from TensorLy
# https://github.com/tensorly/tensorly/blob/main/tensorly/tests/test_tucker_tensor.py
@pytest.mark.parametrize(("true_shape", "true_rank"), [((3, 4, 5), (3, 2, 4))])
def test_validate_tucker_tensor(true_shape, true_rank):
core, factors = ivy.random_tucker(true_shape, true_rank)
# Check shape and rank returned
shape, rank = ivy.TuckerTensor.validate_tucker_tensor((core, factors))
np.testing.assert_equal(
shape,
true_shape,
err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})",
)
np.testing.assert_equal(
rank,
true_rank,
err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})",
)
# One of the factors has the wrong rank
factors[0], copy = ivy.random_uniform(shape=((4, 4))), factors[0]
with np.testing.assert_raises(ValueError):
ivy.TuckerTensor.validate_tucker_tensor((core, factors))
# Not enough factors to match core
factors[0] = copy
with np.testing.assert_raises(ValueError):
ivy.TuckerTensor.validate_tucker_tensor((core, factors[1:]))
# Not enough factors
with np.testing.assert_raises(ValueError):
ivy.TuckerTensor.validate_tucker_tensor((core, factors[:1]))
| ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tucker_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tucker_tensor.py",
"repo_id": "ivy",
"token_count": 3889
} | 69 |
"""Collection of tests for Ivy optimizers."""
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
from ivy_tests.test_ivy.test_functional.test_core.test_gradients import (
get_gradient_arguments_with_lr,
)
# adam
@handle_method(
method_tree="Adam._step",
dtype_x_lr=get_gradient_arguments_with_lr(
min_value=1e-05,
max_value=1e08,
num_arrays=2,
float_lr=True,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
beta1_n_beta2_n_epsilon=helpers.list_of_size(
x=helpers.floats(min_value=1e-1, max_value=1),
size=3,
),
inplace=st.booleans(),
stop_gradients=st.booleans(),
test_gradients=st.just(True),
)
def test_adam_optimizer(
dtype_x_lr,
beta1_n_beta2_n_epsilon,
inplace,
stop_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
test_gradients,
init_flags,
method_flags,
):
input_dtype, x, lr = dtype_x_lr
beta1, beta2, epsilon = beta1_n_beta2_n_epsilon
xs_grad_idxs = [[0, 0]] if method_flags.num_positional_args else [[1, "v"]]
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"lr": lr,
"beta1": beta1,
"beta2": beta2,
"epsilon": epsilon,
"inplace": inplace,
"stop_gradients": stop_gradients,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"v": x[0],
"grads": x[1],
},
class_name=class_name,
method_name=method_name,
rtol_=1e-1,
atol_=1e-1,
test_gradients=test_gradients,
xs_grad_idxs=xs_grad_idxs,
on_device=on_device,
)
# AdamW
@handle_method(
method_tree="AdamW._step",
dtype_x_lr=get_gradient_arguments_with_lr(
min_value=1e-05,
max_value=1e08,
num_arrays=2,
float_lr=True,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
beta1_n_beta2_n_epsilon=helpers.list_of_size(
x=helpers.floats(min_value=1e-1, max_value=1),
size=3,
),
weight_decay=helpers.floats(min_value=0, max_value=1e-1),
inplace=st.booleans(),
stop_gradients=st.booleans(),
test_gradients=st.just(True),
)
def test_adamw_optimizer(
dtype_x_lr,
beta1_n_beta2_n_epsilon,
weight_decay,
inplace,
stop_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
test_gradients,
init_flags,
method_flags,
):
input_dtype, x, lr = dtype_x_lr
beta1, beta2, epsilon = beta1_n_beta2_n_epsilon
xs_grad_idxs = [[0, 0]] if method_flags.num_positional_args else [[1, "v"]]
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"lr": lr,
"beta1": beta1,
"beta2": beta2,
"epsilon": epsilon,
"weight_decay": weight_decay,
"inplace": inplace,
"stop_gradients": stop_gradients,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"v": x[0],
"grads": x[1],
},
class_name=class_name,
method_name=method_name,
rtol_=1e-1,
atol_=1e-1,
test_gradients=test_gradients,
xs_grad_idxs=xs_grad_idxs,
on_device=on_device,
)
# lamb
@handle_method(
method_tree="LAMB._step",
dtype_x_lr=get_gradient_arguments_with_lr(
min_value=-1e5,
max_value=1e5,
num_arrays=2,
float_lr=True,
),
beta1_n_beta2_n_epsilon_n_lambda=helpers.list_of_size(
x=helpers.floats(
min_value=1e-2,
max_value=1.0,
),
size=4,
),
mtr=st.one_of(
helpers.ints(min_value=1, max_value=10),
st.floats(min_value=1e-2, max_value=10, exclude_min=True),
),
inplace=st.booleans(),
stop_gradients=st.booleans(),
test_gradients=st.just(True),
)
def test_lamb_optimizer(
dtype_x_lr,
beta1_n_beta2_n_epsilon_n_lambda,
mtr,
inplace,
stop_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
test_gradients,
):
input_dtype, x, lr = dtype_x_lr
beta1, beta2, epsilon, decay_lambda = beta1_n_beta2_n_epsilon_n_lambda
xs_grad_idxs = [[0, 0]] if method_flags.num_positional_args else [[1, "v"]]
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"lr": lr,
"beta1": beta1,
"beta2": beta2,
"epsilon": epsilon,
"max_trust_ratio": mtr,
"decay_lambda": decay_lambda,
"inplace": inplace,
"stop_gradients": stop_gradients,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"v": x[0],
"grads": x[1],
},
class_name=class_name,
method_name=method_name,
rtol_=1e-1,
atol_=1e-1,
test_gradients=test_gradients,
xs_grad_idxs=xs_grad_idxs,
on_device=on_device,
)
# lars
@handle_method(
method_tree="LARS._step",
dtype_x_lr=get_gradient_arguments_with_lr(num_arrays=2, float_lr=True),
inplace=st.booleans(),
decay_lambda=helpers.floats(min_value=1e-2, max_value=1.0),
stop_gradients=st.booleans(),
test_gradients=st.just(True),
)
def test_lars_optimizer(
dtype_x_lr,
decay_lambda,
inplace,
stop_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
test_gradients,
):
input_dtype, x, lr = dtype_x_lr
if "bfloat16" in input_dtype:
test_gradients = False
xs_grad_idxs = [[0, 0]] if method_flags.num_positional_args else [[1, "v"]]
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"lr": lr,
"decay_lambda": decay_lambda,
"inplace": inplace,
"stop_gradients": stop_gradients,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"v": x[0],
"grads": x[1],
},
class_name=class_name,
method_name=method_name,
rtol_=1e-1,
atol_=1e-1,
test_gradients=test_gradients,
xs_grad_idxs=xs_grad_idxs,
on_device=on_device,
)
# sgd
@handle_method(
method_tree="SGD._step",
dtype_x_lr=get_gradient_arguments_with_lr(
min_value=-1e5,
max_value=1e5,
num_arrays=2,
float_lr=True,
),
inplace=st.booleans(),
stop_gradients=st.booleans(),
test_gradients=st.just(True),
)
def test_sgd_optimizer(
dtype_x_lr,
inplace,
stop_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
test_gradients,
):
input_dtype, x, lr = dtype_x_lr
xs_grad_idxs = [[0, 0]] if method_flags.num_positional_args else [[1, "v"]]
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"lr": lr,
"inplace": inplace,
"stop_gradients": stop_gradients,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"v": x[0],
"grads": x[1],
},
class_name=class_name,
method_name=method_name,
rtol_=1e-2,
atol_=1e-2,
test_gradients=test_gradients,
xs_grad_idxs=xs_grad_idxs,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_stateful/test_optimizers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_optimizers.py",
"repo_id": "ivy",
"token_count": 4527
} | 70 |
import os
import sys
from pydriller import Repository
import pickle # noqa
from tqdm import tqdm
import bz2
import _pickle as cPickle
from get_all_tests import get_all_tests
# Shared Map
tests = {}
N = 128
run_iter = int(sys.argv[1]) - 1
test_names = get_all_tests()
# Create a Dictionary of Test Names to Index
tests = {"index_mapping": test_names, "tests_mapping": {}}
for i in range(len(test_names)):
tests["tests_mapping"][test_names[i]] = i
if __name__ == "__main__":
directories = (
[x[0] for x in os.walk("ivy")]
+ [x[0] for x in os.walk("ivy_tests/test_ivy")]
+ ["ivy_tests"]
)
directories_filtered = [
x for x in directories if not (x.endswith("__pycache__") or "hypothesis" in x)
]
directories = set(directories_filtered)
num_tests = len(test_names)
tests_per_run = num_tests // N
start = run_iter * tests_per_run
end = num_tests if run_iter == N - 1 else (run_iter + 1) * tests_per_run
for test_backend in tqdm(test_names[start:end]):
test_name, backend = test_backend.split(",")
command = (
f'docker run -v "$(pwd)":/ivy unifyai/ivy:latest timeout 30m /bin/bash -c "coverage run --source=ivy,' # noqa
f"ivy_tests -m pytest {test_name} --num-examples 5 --backend {backend} --disable-warnings > coverage_output;coverage " # noqa
f'annotate > coverage_output" '
)
os.system(command)
for directory in directories:
for file_name in os.listdir(directory):
if file_name.endswith("cover"):
file_name = f"{directory}/{file_name}"
if file_name not in tests:
tests[file_name] = []
with open(file_name) as f:
for line in f:
tests[file_name].append(set())
with open(file_name) as f:
i = 0
for line in f:
if line[0] == ">":
tests[file_name][i].add(
tests["tests_mapping"][test_backend]
)
i += 1
os.system("find . -name \\*cover -type f -delete")
commit_hash = ""
for commit in Repository(".", order="reverse").traverse_commits():
commit_hash = commit.hash
break
tests["commit"] = commit_hash
with bz2.BZ2File("tests.pbz2", "w") as f:
cPickle.dump(tests, f)
| ivy/scripts/determine_tests/determine_test_coverage.py/0 | {
"file_path": "ivy/scripts/determine_tests/determine_test_coverage.py",
"repo_id": "ivy",
"token_count": 1278
} | 71 |
import sys
from get_all_tests import get_all_tests
run_iter, gpu, tracer = int(sys.argv[1]), sys.argv[2], sys.argv[3]
if gpu == "true":
from setup_priority_tests import main
main()
with open("tests_to_run", "r") as f:
test_names = [line.strip() for line in f.readlines()]
tests_per_run = 8
else:
test_names = get_all_tests()
tests_per_run = 150
if tracer == "true":
tests_per_run = 20
num_tests = len(test_names)
start = run_iter * tests_per_run
end = (run_iter + 1) * tests_per_run
print("Running Tests:")
with open("tests_to_run", "w") as f:
for i in range(start, end):
i = i % num_tests
test = test_names[i]
print(test)
f.write(test + "\n")
| ivy/scripts/setup_tests/cron_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/cron_tests.py",
"repo_id": "ivy",
"token_count": 322
} | 72 |
#!/bin/bash -e
python3 ivy_tests/array_api_testing/write_array_api_tests_k_flag.py
# shellcheck disable=SC2155
export ARRAY_API_TESTS_K_FLAG=$(cat ivy_tests/array_api_testing/.array_api_tests_k_flag_$1)
if [ "$1" = "torch" ]; then
ARRAY_API_TESTS_K_FLAG="${ARRAY_API_TESTS_K_FLAG} and not (uint16 or uint32 or uint64)"
fi
mkdir -p .hypothesis
# shellcheck disable=SC2046
docker run --rm --env IVY_BACKEND="$1" --env ARRAY_API_TESTS_MODULE="ivy" --env REDIS_URL="$3" --env REDIS_PASSWD="$4" -v $(pwd):/ivy -v $(pwd)/.hypothesis:/.hypothesis unifyai/ivy:latest python3 -m pytest ivy_tests/array_api_testing/test_array_api/array_api_tests/"$2".py -k "$ARRAY_API_TESTS_K_FLAG" --tb=short -vv
| ivy/scripts/shell/test_array_api.sh/0 | {
"file_path": "ivy/scripts/shell/test_array_api.sh",
"repo_id": "ivy",
"token_count": 302
} | 73 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JavaScriptSettings">
<option name="languageLevel" value="ES6" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Remote Python 3.10.0 Docker (unifyai/ivy:latest)" project-jdk-type="Python SDK" />
<component name="PyPackaging">
<option name="earlyReleasesAsUpgrades" value="true" />
</component>
</project>
| ivy/.idea/misc.xml/0 | {
"file_path": "ivy/.idea/misc.xml",
"repo_id": "ivy",
"token_count": 144
} | 0 |
## Reformatting Task Checklist
#### IMPORTANT NOTICE 🚨:
The [Ivy Docs](https://unify.ai/docs/ivy/) represent the ground truth for the task descriptions and this checklist should only be used as a supplementary item to aid with the review process.
#### LEGEND 🗺:
- ❌ : Check item is not completed.
- ✅ : Check item is ready for review.
- 🆘 : Stuck/Doubting implementation (PR author should add comments explaining why).
- ⏩ : Check is not applicable to function (skip).
- 🆗 : Check item is already implemented and does not require any edits.
#### CHECKS 📑:
1. - [ ] ❌: Remove all lambda and direct bindings for the backend functions in:
- [ ] ❌: [ivy/functional/backends/jax/{{ .category_name }}.py](https://github.com/unifyai/ivy/tree/main/ivy/functional/backends/jax/{{ .category_name }}.py).
- [ ] ❌: [ivy/functional/backends/numpy/{{ .category_name }}.py](https://github.com/unifyai/ivy/tree/main/ivy/functional/backends/numpy/{{ .category_name }}.py).
- [ ] ❌: [ivy/functional/backends/tensorflow/{{ .category_name }}.py](https://github.com/unifyai/ivy/tree/main/ivy/functional/backends/tensorflow/{{ .category_name }}.py).
- [ ] ❌: [ivy/functional/backends/torch/{{ .category_name }}.py](https://github.com/unifyai/ivy/tree/main/ivy/functional/backends/torch/{{ .category_name }}.py).
2. - [ ] ❌: Implement the following if they don't exist:
1. - [ ] ❌: The `ivy.Array` instance method in [ivy/data_classes/array/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/{{ .category_name }}.py).
2. - [ ] ❌: The `ivy.Array` special method in [ivy/data_classes/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py).
3. - [ ] ❌: The `ivy.Array` reverse special method in [ivy/data_classes/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py).
4. - [ ] ❌: The `ivy.Container` static method in [ivy/data_classes/container/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/{{ .category_name }}.py).
5. - [ ] ❌: The `ivy.Container` instance method in [ivy/data_classes/container/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/{{ .category_name }}.py).
6. - [ ] ❌: The `ivy.Container` special method in [ivy/data_classes/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py).
7. - [ ] ❌: The `ivy.Container` reverse special method in [ivy/data_classes/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py).
3. - [ ] ❌: Make sure that the aforementioned methods are added into the correct category-specific parent class, such as `ivy.ArrayWithElementwise`, `ivy.ContainerWithManipulation` etc.
4. - [ ] ❌: Correct all of the [Function Arguments and the type hints](https://unify.ai/docs/ivy/overview/deep_dive/function_arguments.html#function-arguments) for every function **and** its _relevant methods_, including those you did not implement yourself.
5. - [ ] ❌: Add the correct [Docstrings](https://unify.ai/docs/ivy/overview/deep_dive/docstrings.html#docstrings) to every function **and** its _relevant methods_, including those you did not implement yourself. The following should be added:
1. - [ ] ❌: <a name="ref1"></a> The function's [Array API standard](https://data-apis.org/array-api/latest/index.html) description in [ivy/functional/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/functional/ivy/{{ .category_name }}.py). If the function is not part of the Array API standard then a description of similar style should be added to the same file.
The following modifications should be made to the description:
- [ ] ❌: Remove type definitions in the `Parameters` and `Returns` sections.
- [ ] ❌: Add `out` to the `Parameters` section if function accepts an `out` argument.
- [ ] ❌: Replace `out` with `ret` in the `Returns` section.
2. - [ ] ❌: Reference to docstring for ivy.function_name ([5.a](#ref1)) for the function description **and** modified `Parameters` and `Returns` sections as described in [the docs](https://unify.ai/docs/ivy/overview/deep_dive/docstrings.html#docstrings) in:
- [ ] ❌: [ivy/array/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/{{ .category_name }}.py).
- [ ] ❌: [ivy/container/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/{{ .category_name }}.py) (in the static and instance method versions).
- [ ] ❌: [ivy/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py) if the function has a special method ( like `__function_name__` ).
- [ ] ❌: [ivy/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py) if the function has a reverse special method ( like `__rfunction_name__` ).
- [ ] ❌: [ivy/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py) if the function has a special method ( like `__function_name__` ).
- [ ] ❌: [ivy/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py) if the function has a reverse special method ( like `__rfunction_name__` ).
6. - [ ] ❌: Add thorough [Docstring Examples](https://unify.ai/docs/ivy/overview/deep_dive/docstring_examples.html#docstring-examples) for every function **and** its _relevant methods_ and ensure they pass the docstring tests.
**Functional Examples** in [ivy/functional/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/functional/ivy/{{ .category_name }}.py).
1. - [ ] ❌: Cover all possible variants for each of the arguments independently (not combinatorily).
2. - [ ] ❌: Vary the values and input shapes considerably between examples.
3. - [ ] ❌: Start out simple and get more complex with each example.
4. - [ ] ❌: Show an example with:
- [ ] ❌: `out` unused.
- [ ] ❌: `out` used to update a new array y.
- [ ] ❌: `out` used to inplace update the input array x (if x has the same dtype and shape as the return).
5. - [ ] ❌: If broadcasting is relevant for the function, then show examples which highlight this.
**Nestable Function Examples** in [ivy/functional/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/functional/ivy/{{ .category_name }}.py).
Only if the function supports nestable operations.
6. - [ ] ❌: <a name="ref2"></a> Add an example that passes in an `ivy.Container` instance in place of one of the arguments.
7. - [ ] ❌: <a name="ref3"></a> Add an example passes in `ivy.Container` instances for multiple arguments.
**Container Static Method Examples** in [ivy/container/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/{{ .category_name }}.py).
8. - [ ] ❌: The example from point ([6.f](#ref2)) should be replicated, but added to the `ivy.Container` **static method** docstring in with `ivy.<func_name>` replaced with `ivy.Container.static_<func_name>` in the example.
9. - [ ] ❌: The example from point ([6.g](#ref3)) should be replicated, but added to the `ivy.Container` **static method** docstring, with `ivy.<func_name>` replaced with `ivy.Container.static_<func_name>` in the example.
**Array Instance Method Example** in [ivy/array/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/{{ .category_name }}).
10. - [ ] ❌: Call this instance method of the `ivy.Array` class.
**Container Instance Method Example** in [ivy/container/{{ .category_name }}.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/{{ .category_name }}.py).
11. - [ ] ❌: Call this instance method of the `ivy.Container` class.
**Array Operator Examples** in [ivy/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py).
12. - [ ] ❌: Call the operator on two `ivy.Array` instances.
13. - [ ] ❌: Call the operator with an `ivy.Array` instance on the left and `ivy.Container` on the right.
**Array Reverse Operator Example** in [ivy/array/array.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/array/array.py).
14. - [ ] ❌: Call the operator with a `Number` on the left and an `ivy.Array` instance on the right.
**Container Operator Examples** in [ivy/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py).
15. - [ ] ❌: Call the operator on two `ivy.Container` instances containing Number instances at the leaves.
16. - [ ] ❌: Call the operator on two `ivy.Container` instances containing `ivy.Array` instances at the leaves.
17. - [ ] ❌: Call the operator with an `ivy.Container` instance on the left and `ivy.Array` on the right.
**Container Reverse Operator Example** in [ivy/container/container.py](https://github.com/unifyai/ivy/blob/main/ivy/data_classes/container/container.py).
18. - [ ] ❌: Following example in the [`ivy.Container.__radd__`](https://github.com/unifyai/ivy/blob/e28a3cfd8a4527066d0d92d48a9e849c9f367a39/ivy/container/container.py#L173) docstring, with the operator called with a `Number` on the left and an `ivy.Container` instance on the right.
**Tests**
19. - [ ] ❌: Docstring examples tests passing.
20. - [ ] ❌: Lint checks passing.
| ivy/automation_tools/checklists/reformat_checklist.md/0 | {
"file_path": "ivy/automation_tools/checklists/reformat_checklist.md",
"repo_id": "ivy",
"token_count": 3501
} | 1 |
{
"tensorflow": ["tensorflow-cpu", "tensorflow-probability"],
"jax": ["ml-dtypes","jax[cpu]","dm-haiku", "flax", "jaxlib"],
"numpy": ["numpy"],
"paddle": ["paddlepaddle"],
"mxnet": ["mxnet"],
"torch": ["torchvision"]
}
| ivy/docker/requirement_mappings.json/0 | {
"file_path": "ivy/docker/requirement_mappings.json",
"repo_id": "ivy",
"token_count": 102
} | 2 |
Error Handling
==============
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`pycharm thread`: https://discord.com/channels/799879767196958751/1186628916522262629
.. _`docker thread`: https://discord.com/channels/799879767196958751/1186629067966009424
.. _`pre-commit thread`: https://discord.com/channels/799879767196958751/1186629635694399539
.. _`pip packages thread`: https://discord.com/channels/799879767196958751/1186629837515935765
.. _`ivy tests thread`: https://discord.com/channels/799879767196958751/1189907526226034698
This section, "Error Handling" aims to assist you in navigating through some common errors you might encounter while working with the Ivy's Functional API. We'll go through some common errors which you might encounter while working as a contributor or a developer.
#. This is the case where we pass in a dtype to `torch` which is not actually supported by the torch's native framework itself.
.. code-block:: python
E RuntimeError: "logaddexp2_cpu" not implemented for 'Half'
E Falsifying example: test_logaddexp2(
E backend_fw='torch',
E on_device='cpu',
E dtype_and_x=(['float16', 'float16'],
E [array([-1.], dtype=float16), array([-1.], dtype=float16)]),
E test_flags=FunctionTestFlags(
E ground_truth_backend='tensorflow',
E num_positional_args=2,
E with_out=False,
E instance_method=False,
E test_gradients=False,
E test_trace=None,
E as_variable=[False],
E native_arrays=[False],
E container=[False],
E ),
E fn_name='logaddexp2',
E )
E
E You can reproduce this example by temporarily adding @reproduce_failure('6.82.4', b'AXicY2BkAAMoBaaR2WAAAACVAAY=') as a decorator on your test case
**Solution:**
As we are explicitly passing in a `dtype` which is not supported in the torch framework itself so torch backend fails here, a possible fix is adding the dtype in the unsupported dtype decoartor which would look something like this.
.. code-block:: python
@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, backend_version)
and place it above the function definition.
#. This is the case where the value from the ground-truth backend(tensorflow) does not match the value of the backend(jax) we are testing for this case.
.. code-block:: python
E AssertionError: the results from backend jax and ground truth framework tensorflow do not match
E 0.25830078125!=0.258544921875
E
E
E Falsifying example: test_acosh(
E backend_fw='jax',
E on_device='cpu',
E dtype_and_x=(['float16'], [array(4., dtype=float16)]),
E test_flags=FunctionTestFlags(
E ground_truth_backend='tensorflow',
E num_positional_args=1,
E with_out=False,
E instance_method=False,
E test_gradients=True,
E test_trace=None,
E as_variable=[False],
E native_arrays=[False],
E container=[False],
E ),
E fn_name='acosh',
E )
E
E You can reproduce this example by temporarily adding @reproduce_failure('6.82.4', b'AXicY2BAABYQwQgiAABDAAY=') as a decorator on your test case
**Solution:**
As both the results are pretty close to each others in this case, adding an `rtol = 10^-3` and `atol = 10^-3` would fix the failing tests here.
.. code-block:: python
@handle_test(
fn_tree="functional.ivy.acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
),
)
def test_acosh(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
input_dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
)
#. This is a similar assertion as stated in point 2 but with torch and ground-truth tensorflow not matching but the matrices are quite different so there should be an issue in the backends rather than a numerical instability here.
.. code-block:: python
E AssertionError: the results from backend torch and ground truth framework tensorflow do not match
E [[1.41421356 1.41421356 1.41421356]
E [1.41421356 1.41421356 1.41421356]
E [1.41421356 inf 1.41421356]]!=[[1.41421356e+000 1.41421356e+000 1.41421356e+000]
E [1.41421356e+000 1.41421356e+000 1.41421356e+000]
E [1.41421356e+000 1.34078079e+154 1.41421356e+000]]
E
E
E Falsifying example: test_abs(
E backend_fw='torch',
E on_device='cpu',
E dtype_and_x=(['complex128'],
E [array([[-1.-1.00000000e+000j, -1.-1.00000000e+000j, -1.-1.00000000e+000j],
E [-1.-1.00000000e+000j, -1.-1.00000000e+000j, -1.-1.00000000e+000j],
E [-1.-1.00000000e+000j, -1.-1.34078079e+154j, -1.-1.00000000e+000j]])]),
E fn_name='abs',
E test_flags=FunctionTestFlags(
E ground_truth_backend='tensorflow',
E num_positional_args=1,
E with_out=False,
E instance_method=False,
E test_gradients=False,
E test_trace=None,
E as_variable=[False],
E native_arrays=[False],
E container=[False],
E ),
E )
E
E You can reproduce this example by temporarily adding @reproduce_failure('6.82.4', b'AXicY2ZkYAIiBiBgZIAAxqHEXsAAB7jUQAAAMtEAzQ==') as a decorator on your test case
**Solution:**
If this is passing for all other backends and just failing for torch, and the result matrices are also different which states there is not a numerical instability, the issue is with the torch backend. The best approach in this case is to see the torch backend, there should be an issue in the implementation. You have to correct the backend implementation for torch.
**Note**
This section is specifically targeted towards dealing with the Ivy Functional API and the Ivy Experimental API.
**Round Up**
This should have hopefully given you an understanding of how to deal with common errors while working with the the functional API.
If you have any questions, please feel free to reach out on `discord`_ in the `ivy tests thread`_, `pycharm thread`_, `docker thread`_, `pre-commit thread`_, `pip packages thread`_ depending on the question!
| ivy/docs/overview/contributing/error_handling.rst/0 | {
"file_path": "ivy/docs/overview/contributing/error_handling.rst",
"repo_id": "ivy",
"token_count": 3728
} | 3 |
Docstrings
==========
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`spec/API_specification/array_api`: https://github.com/data-apis/array-api/blob/main
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`docstrings thread`: https://discord.com/channels/799879767196958751/1189906836426596412
All functions in the Ivy API at :mod:`ivy/functional/ivy/category_name.py` should have full and thorough docstrings.
In contrast, all backend implementations at :mod:`ivy/functional/backends/backend_name/category_name.py` should not have any docstrings, on account that these are effectively just different instantiations of the functions at :mod:`ivy/functional/ivy/category_name.py`.
In order to explain how docstrings should be written, we will use :func:`ivy.tan` as an example.
Firstly, if the function exists in the `Array API Standard`_, then we start with the corresponding docstring as a template.
These docstrings can be found under `spec/API_specification/array_api`_.
Important: you should open the file in **raw** format.
If you copy directly from the file preview on GitHub before clicking **raw**, then the newlines will **not** be copied over, and the docstring will render incorrectly in the online docs.
The `Array API Standard`_ docstring for :code:`tan` is as follows:
.. parsed-literal::
Calculates an implementation-dependent approximation to the tangent, having domain ``(-infinity, +infinity)`` and codomain ``(-infinity, +infinity)``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
Parameters
----------
x: array
input array whose elements are expressed in radians. Should have a floating-point data type.
Returns
-------
out: array
an array containing the tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
This is a good starting point.
But we need to make some changes.
Firstly, given that we are using type hints, repeating all of the types also in the docs would be a needless duplication.
Therefore, we simply remove all type info from the docstring like so:
.. code-block:: diff
-x: array
+x
-out: array
+out
The `Array API Standard`_ defines a subset of behaviour that each function must adhere to.
Ivy extends many of these functions with additional behaviour and arguments.
In the case of :func:`ivy.tan`, there is also the argument :code:`out` which needs to be added to the docstring, like so:
.. code-block:: diff
+out
+ optional output array, for writing the result to. It must have a shape that the inputs
+ broadcast to.
Because of this :code:`out` argument in the input, we also need to rename the :code:`out` argument in the return, which is the default name used in the `Array API Standard`_.
We change this to :code:`ret`:
.. code-block:: diff
-out
+ret
Next, we add a section in the docstring which explains that it has been modified from the version available in the
`Array API Standard`_:
.. code-block:: diff
+This function conforms to the `Array API Standard
+<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
+`docstring <https://data-apis.org/array-api/latest/API_specification/generated/array_api.tan.html>`_
+in the standard.
Finally, **if** the function is *nestable*, then we add a simple explanation for this as follows:
.. code-block:: diff
+Both the description and the type hints above assumes an array input for simplicity,
+but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
+instances in place of any of the arguments.
Following these changes, the new docstring is as follows:
.. parsed-literal::
Calculates an implementation-dependent approximation to the tangent, having
domain ``(-infinity, +infinity)`` and codomain ``(-infinity, +infinity)``, for each
element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be
expressed in radians.
**Special cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the result is ``NaN``.
- If ``x_i`` is ``+0``, the result is ``+0``.
- If ``x_i`` is ``-0``, the result is ``-0``.
- If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
Parameters
----------
x
input array whose elements are expressed in radians. Should have a
floating-point data type.
out
optional output array, for writing the result to. It must have a shape that the inputs
broadcast to.
Returns
-------
ret
an array containing the tangent of each element in ``x``. The return must have a
floating-point data type determined by :ref:`type-promotion`.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/API_specification/generated/array_api.tan.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
If the function that you are writing a docstring for is **not** in the `Array API Standard`_, then you must simply follow this general template as closely as possible, but instead you must use your own judgment when adding descriptions for the overall function, and also for each of its arguments.
**Classes**
The instance methods in :class:`ivy.Array` and :class:`ivy.Container` which directly wrap a function in the functional API do not require thorough docstrings, on account that these instance methods require no explanation beyond that provided in the docstring for the wrapped function.
Therefore, these docstrings should all simply contain the following text:
.. code-block:: python
ivy.<Array|Container> <instance|special|reverse special> method variant of ivy.<func_name>. This method simply wraps the
function, and so the docstring for ivy.<func_name> also applies to this method
with minimal changes.
Parameters
----------
<parameters with their description>
Returns
-------
<return value with its description>
The exception to this is :class:`ivy.Container` :code:`special` method docstrings,
which should instead use the following text, as these do not *directly* wrap a function
in Ivy's functional API, but rather wrap the pure operator functions themselves,
which can be called on any types that support the corresponding special methods:
.. parsed-literal::
ivy.Container <special|reverse special> method for the <operator_name> operator,
calling :code:`operator.<operator_name>` for each of the corresponding leaves of
the two containers.
Parameters
----------
<parameters with their description>
Returns
-------
<return value with its description>
Let's take :func:`ivy.add` as an example.
The docstring for `ivy.add <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/functional/ivy/elementwise.py#L191>`_ is thorough, as explained above.
However, the docstrings for `ivy.Array.add <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/array/elementwise.py#L36>`_, `ivy.Container.add <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/container/elementwise.py#L209>`_ all follow the succinct pattern outlined above.
Likewise, the docstrings for the special methods `ivy.Array.__add__ <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/array/array.py#L310>`_, `ivy.Array.__radd__ <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/array/array.py#L359>`_, `ivy.Container.__add__ <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/container/container.py#L106>`_, and `ivy.Container.__radd__ <https://github.com/unifyai/ivy/blob/04766790a518ecde380cb6eeb05aa89cf5acdbfd/ivy/container/container.py#L171>`_, also follow the succinct pattern outlined above.
Note that these docstrings all *also* include examples, which we will cover in the next section.
For all other classes, such as the various layers at :code:`ivy/ivy/stateful/layers`, then we should add full and thorough docstrings for both the **constructor** and also **all methods**.
This is the case even when the class directly wraps a function in the functional API.
For example, the class `ivy.Linear <https://github.com/unifyai/ivy/blob/51c23694c2f51e88caef0f382f200b195f8458b5/ivy/stateful/layers.py#L13>`_ wraps the function `ivy.linear <https://github.com/unifyai/ivy/blob/51c23694c2f51e88caef0f382f200b195f8458b5/ivy/functional/ivy/layers.py#L22>`_, but does so in a stateful manner with the variables stored internally in the instance of the class.
Even though the :class:`ivy.Linear` class wraps :func:`ivy.linear` in the forward pass defined in `ivy.Linear._forward <https://github.com/unifyai/ivy/blob/51c23694c2f51e88caef0f382f200b195f8458b5/ivy/stateful/layers.py#L84>`_, the function signatures of :func:`ivy.linear` and :meth:`ivy.Linear._forward` are still quite distinct, with the former including all trainable variables explicitly, and the latter having these implicit as internal instance attributes of the class.
Therefore, with the exception of the :class:`ivy.Array` and :class:`ivy.Container` methods which directly wrap functions in the functional API, we should always add full and thorough docstrings to all methods of all other classes in Ivy, including cases where these also directly wrap functions in the functional API.
**Round Up**
These examples should hopefully give you a good understanding of what is required when adding docstings.
If you have any questions, please feel free to reach out on `discord`_ in the `docstrings thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/TnshJ8swuJM" class="video">
</iframe>
| ivy/docs/overview/deep_dive/docstrings.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/docstrings.rst",
"repo_id": "ivy",
"token_count": 3339
} | 4 |
Design
======
.. _`Deep Dive`: deep_dive.rst
This section is aimed at general users, who would like to learn how to use Ivy, and are less concerned about how it all works under the hood 🔧
The `Deep Dive`_ section is more targeted at potential contributors, and at users who would like to dive deeper into the weeds of the framework🌱, and gain a better understanding of what is actually going on behind the scenes 🎬
If that sounds like you, feel free to check out the `Deep Dive`_ section after you've gone through the higher level overview which is covered in this *design* section!
| So, starting off with our higher level *design* section, Ivy can fulfill two distinct purposes:
|
| 1. enable automatic code conversions between frameworks
| 2. serve as a new ML framework with multi-framework support
|
| The Ivy codebase can then be split into three categories which are labelled (a),
| (b) and (c) below, and can be further split into 8 distinct submodules.
| The eight submodules are Ivy API, Backend Handler, Backend API, Ivy Array,
| Ivy Container, Ivy Stateful API, and finally Frontend API.
| All eight fall into one of the three categories as follows:
.. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/design/submodule_dependency_graph.png?raw=true
:align: center
:width: 100%
| (a) `Building Blocks <design/building_blocks.rst>`_
| back-end functional APIs ✅
| Ivy functional API ✅
| Framework Handler ✅
| Ivy Tracer 🚧
|
| (b) `Ivy as a Transpiler <design/ivy_as_a_transpiler.rst>`_
| front-end functional APIs 🚧
|
| (c) `Ivy as a Framework <design/ivy_as_a_framework.rst>`_
| Ivy stateful API ✅
| Ivy Container ✅
| Ivy Array ✅
.. toctree::
:hidden:
:maxdepth: -1
:caption: Design
design/building_blocks.rst
design/ivy_as_a_transpiler.rst
design/ivy_as_a_framework.rst
| ivy/docs/overview/design.rst/0 | {
"file_path": "ivy/docs/overview/design.rst",
"repo_id": "ivy",
"token_count": 576
} | 5 |
``ivy.trace_graph()``
=====================
..
⚠️ **Warning**: The tracer and the transpiler are not publicly available yet, so certain parts of this doc won't work as expected as of now!
When we call an Ivy function, there is always a small performance hit due to added
Python wrapping. This overhead becomes increasingly noticeable when we use large
models with multiple function calls. The Tracer improves the performance of
Ivy by removing the extra wrapping around each function call.
The Tracer takes in any Ivy function, framework-specific (backend) function,
or composition of both, and produces a simplified executable computation graph composed
of functions from the backend functional API only, which results in:
- Simplified code: The Tracer simplifies the code by removing all the wrapping
and functions that don't contribute to the output: print statements, loggers, etc.
- Improved performance: The created graph has no performance overhead due to Ivy's
function wrapping, likewise, redundant operations from the original function are also
removed, increasing its overall performance.
Tracer API
------------
.. py:function:: ivy.trace_graph(*objs, stateful = None, arg_stateful_idxs = None, kwarg_stateful_idxs = None, to = None, include_generators = True, array_caching = True, return_backend_traced_fn = False, static_argnums = None, static_argnames = None, args = None, kwargs = None,)
Creates a ``Callable`` or set of them into an Ivy graph. If ``args`` or ``kwargs`` are specified,
compilation is performed eagerly, otherwise, compilation will happen lazily.
:param objs: Callable(s) to trace and create a graph of.
:type objs: ``Callable``
:param stateful: List of instances to be considered stateful during the graph compilation.
:type stateful: ``Optional[List]``
:param arg_stateful_idxs: Positional arguments to be considered stateful during the graph compilation.
:type arg_stateful_idxs: ``Optional[List]``
:param kwarg_stateful_idxs: Keyword arguments to be considered stateful during the graph compilation.
:type kwarg_stateful_idxs: ``Optional[List]``
:param to: Backend that the graph will be traced to. If not specified, the current backend will be used.
:type to: ``Optional[str]``
:param include_generators: Include array creation/generation functions as part of the graph.
:type include_generators: ``bool``
:param array_caching: Cache the constant arrays that appear as arguments to the functions in the graph.
:type array_caching: ``bool``
:param return_backend_traced_fn: Whether to apply the native compilers, i.e. tf.function, after ivy's compilation.
:type return_backend_traced_fn: ``bool``
:param static_argnums: For jax's jit compilation.
:type static_argnums: ``Optional[Union[int, Iterable[int]]]``
:param static_argnames: For jax's jit compilation.
:type static_argnames: ``Optional[Union[str, Iterable[str]]]``
:param args: Positional arguments for obj.
:type args: ``Optional[Tuple]``
:param kwargs: Keyword arguments for obj.
:type kwargs: ``Optional[dict]``
:rtype: ``Union[Graph, LazyGraph, ivy.Module, ModuleType]``
:return: A ``Graph`` or a non-initialized ``LazyGraph``. If the object is an ``ivy.Module``, the forward pass will be traced and the same module will be returned. If the object is a ``ModuleType``, the function will return a copy of the module with every method lazily traced.
Using the tracer
------------------
To use the ``ivy.trace_graph()`` function, you need to pass a callable object and the corresponding inputs
to the function.
Let's start with a simple function:
.. code-block:: python
import ivy
ivy.set_backend("torch")
def fn(x, y):
z = x**y
print(z)
k = x * y
j = ivy.concat([x, z, y])
sum_j = ivy.sum(j)
return z
x = ivy.array([1, 2, 3])
y = ivy.array([2, 3, 4])
# Trace the function
traced_fn = ivy.trace_graph(fn, args=(x, y))
In this case, the created graph would be:
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure1.png
From the graph, we can observe that:
1. As ``x`` and ``y`` are the only variables used when calculating the returned value ``z``,
the non-contributing variable(s), ``k`` was not included in the graph. Function calls that
don't contribute to the output like the ``print`` function were also excluded.
2. As we set the backend to ``torch`` during the compilation process, the traced
functions are torch functions, and the input and output types are torch tensors.
3. The tensor shape in the graph only indicates the shape of the inputs the graph was
traced with. The tracer doesn't impose additional restrictions on the shape or
datatype of the input array(s).
.. code-block:: python
# Original set of inputs
out = traced_fn(x, y)
# Inputs of different shape
a = ivy.array([[1., 2.]])
b = ivy.array([[2., 3.]])
# New set of inputs
out = traced_fn(a, b)
Eager vs lazy Compilation
~~~~~~~~~~~~~~~~~~~~~~~~~
The Tracer runs the original function under the hood and tracks its computation
to create the created graph. The **eager compilation** method traces the graph in the
corresponding function call with the specified inputs before we use the traced
function.
Instead of compiling functions before using them, Ivy also allows you to trace the
function dynamically. This can be done by passing only the function to the
trace method and not including the function arguments. In this case, the output will be a
``LazyGraph`` instead of a ``Graph`` instance. When this ``LazyGraph`` object is first invoked with
function arguments, it Creates the function and returns the output of the traced
function. Once the graph has been initialized, calls to the ``LazyGraph`` object will
use the traced function to compute the outputs directly.
.. code-block:: python
# Trace the function eagerly (compilation happens here)
eager_graph = ivy.trace_graph(fn, args=(x, y))
# Trace the function lazily (compilation does not happen here)
lazy_graph = ivy.trace_graph(fn)
# Trace and return the output
out = lazy_graph(x, y)
To sum up, lazy compilation enables you to delay the compilation process until you have
the necessary inputs during execution. This is particularly useful in cases like
compiling libraries, where it’s not feasible to provide valid arguments for every
function call.
Now let's look at additional functionalities that you can find in the
tracer.
Array caching
~~~~~~~~~~~~~
The tracer is able to cache constant arrays and their operations through the
``array_caching`` flag, reducing computation time after compilation.
.. code-block:: python
import ivy
ivy.set_backend("torch")
def fn(x):
b = ivy.array([2])
a = ivy.array([2])
z = x ** (a + b)
return z
comp_func = ivy.trace_graph(fn, args=(x,))
When calling ``ivy.trace_graph()``, the ``array_caching`` argument is set to ``True`` by
default, which returns the following graph.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure2.png
This shows that by caching the constant operation in the graph, a simpler graph can be
obtained. However, if desired, this argument can be set to ``False``, which results in the
graph below. This ultimately results in a trade-off between time and memory, as
cached results need to be stored in memory but if they are not cached these operations
need to be performed.
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure3.png
Generators
~~~~~~~~~~
By using the ``include_generators`` argument, you can choose whether generator functions
are included as nodes or "baked" into the graph.
.. code-block:: python
import ivy
ivy.set_backend("torch")
def fn(x):
a = torch.randint(0, 100, size=[1])
z = x ** a
return z + torch.rand([1])
comp_func = ivy.trace_graph(fn, include_generators=True, args=(x,))
Returns:
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure4.png
And instead,
.. code-block:: python
import ivy
ivy.set_backend("torch")
def fn(x):
a = torch.randint(0, 100, size=[1])
z = x * a
return z + torch.rand([1])
comp_func = ivy.trace_graph(fn, include_generators=False, args=(x,))
Returns:
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure5.png
Stateful
~~~~~~~~
Finally, you can also track ``__setattr__`` and ``__getattr__`` methods of
arbitrary classes using the ``stateful`` parameters.
.. code-block:: python
import ivy
ivy.set_backend("torch")
def fn(cont, x):
cont.new_attribute = x
return x + 1
x = torch.tensor([0])
cont = ivy.Container(x=x)
args = (cont.cont_deep_copy(), x)
comp_func = ivy.trace_graph(fn, arg_stateful_idxs=[[0]], args=args)
.. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/compiler/figure6.png
Sharp bits
----------
As some parts of the Tracer are still under development, there are some sharp
bits to take into account when using it. All of these points are WIP, so they'll be
removed soon!
1. **Dynamic control flow**: The created graph is built using function tracing at the
moment, so dynamic control flow such as conditional branches or conditional loops
will not be registered correctly. As an example, if there is a while loop in your
code that depends on a changing value, the number of iterations in the final graph
will be the same as the number of iterations performed with the input passed to the
trace function.
2. **Non-framework-specific code**: As the tracer traces the function using the
functional API of the underlying framework, any piece of code inside the model that
is not from the said framework will not be correctly registered, this includes other
frameworks code (such as NumPy statements inside a torch model) or python statements
such as len().
3. **Incorrectly cached parts of the graph**: There are certain cases where compilation
can succeed but hide some cached parts of the graph which shouldn't really be cached.
To check this, it's recommended to trace with a noise array of the same shape and
then check if the output of the original function and the created graph with another
input is the same. If you find out that the graph is not right, feel free to open an
`issue <https://github.com/unifyai/ivy/issues>`_ with a minimal example and we'll look
into it!
Examples
--------
Below, we trace a ResNet50 model from
`Hugging Face <https://huggingface.co/microsoft/resnet-50>`_ and use it to classify the
breed of a cat.
.. code-block:: python
import ivy
from transformers import AutoImageProcessor, ResNetForImageClassification
from datasets import load_dataset
# Set backend to torch
ivy.set_backend("torch")
# Download the input image
dataset = load_dataset("huggingface/cats-image")
image = dataset["test"]["image"][0]
# Setting the model
image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
# Preprocessing the input image
inputs = image_processor(image, return_tensors="pt")
Normally, we would then feed these inputs to the model itself without compiling it
.. code-block:: python
# Normal flow using pytorch
with torch.no_grad():
logits = model(**inputs).logits
With ivy, you can trace your model to a computation graph for increased performance.
.. code-block:: python
# Compiling the model
traced_graph = ivy.trace_graph(model, args=(**inputs,))
# Using the traced function
logits = traced_graph(**inputs).logits
Time for the final output of our computation graph.
.. code-block:: python
predicted_label = logits.argmax(-1).item()
print(model.config.id2label[predicted_label])
| ivy/docs/overview/one_liners/trace.rst/0 | {
"file_path": "ivy/docs/overview/one_liners/trace.rst",
"repo_id": "ivy",
"token_count": 3750
} | 6 |
discussion_channel_map = {
"ivy.data_classes.array.array": ["933380487353872454"],
"ivy.data_classes.container.container": ["982738042886422598"],
"ivy.functional.ivy.activations": ["1000043490329251890"],
"ivy.functional.ivy.compilation": ["1000043526849056808"],
"ivy.functional.ivy.constants": ["1000043627961135224"],
"ivy.functional.ivy.creation": ["1000043690254946374"],
"ivy.functional.ivy.data_type": ["1000043749088436315"],
"ivy.functional.ivy.device": ["1000043775021826229"],
"ivy.functional.ivy.elementwise": ["1000043825085026394"],
"ivy.functional.ivy.experimental": ["1028272402624434196"],
"ivy.functional.ivy.extensions": ["1028272402624434196"],
"ivy.functional.ivy.general": ["1000043859973247006"],
"ivy.functional.ivy.gradients": ["1000043921633722509"],
"ivy.functional.ivy.layers": ["1000043967989162005"],
"ivy.functional.ivy.linear_algebra": ["1000044022942933112"],
"ivy.functional.ivy.losses": ["1000044049333485648"],
"ivy.functional.ivy.manipulation": ["1000044082489466951"],
"ivy.functional.ivy.meta": ["1000044106959044659"],
"ivy.functional.ivy.nest": ["1000044136000393326"],
"ivy.functional.ivy.norms": ["1000044163070447626"],
"ivy.functional.ivy.random": ["1000044191658815569"],
"ivy.functional.ivy.searching": ["1000044227247484980"],
"ivy.functional.ivy.set": ["1000044247606644786"],
"ivy.functional.ivy.sorting": ["1000044274148184084"],
"ivy.functional.ivy.statistical": ["1000044336479731872"],
"ivy.functional.ivy.utility": ["1000044369044312164"],
"ivy.stateful.activations": ["1000043360297439272"],
"ivy.stateful.converters": ["1000043009758474310"],
"ivy.stateful.initializers": ["1000043132706115654"],
"ivy.stateful.layers": ["1000043206840426686"],
"ivy.stateful.module": ["1000043315267387502"],
"ivy.stateful.norms": ["1000043235802107936"],
"ivy.stateful.optimizers": ["1000043277870964747"],
"ivy.stateful.sequential": ["1000043078381473792"],
}
# Only generate docs for index.rst
# That resolved a bug of autosummary generating docs for code-block examples
# of autosummary
autosummary_generate = ["index.rst"]
skippable_method_attributes = [{"__qualname__": "_wrap_function.<locals>.new_function"}]
autosectionlabel_prefix_document = True
# Retrieve html_theme_options from docs/conf.py
from docs.conf import html_theme_options
html_theme_options["switcher"]["json_url"] = "https://unify.ai/docs/versions/ivy.json"
html_sidebars = {"**": ["custom-toc-tree"]}
repo_name = "ivy"
# Retrieve demos specific configuration
from docs.demos.demos_conf import * # noqa
| ivy/docs/partial_conf.py/0 | {
"file_path": "ivy/docs/partial_conf.py",
"repo_id": "ivy",
"token_count": 1054
} | 7 |
# global
import abc
from typing import Optional, Union, Literal
# local
import ivy
# noinspection PyUnresolvedReferences
class _ArrayWithElementwise(abc.ABC):
def abs(
self: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array: # noqa
"""ivy.Array instance method variant of ivy.abs. This method simply
wraps the function, and so the docstring for ivy.abs also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the absolute value of each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([2.6, -6.6, 1.6, -0])
>>> y = x.abs()
>>> print(y)
ivy.array([ 2.6, 6.6, 1.6, 0.])
"""
return ivy.abs(self, out=out)
def acosh(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.acosh. This method simply
wraps the function, and so the docstring for ivy.acosh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent the area of a hyperbolic sector.
Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse hyperbolic cosine
of each element in ``self``.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([2., 10.0, 1.0])
>>> y = x.acosh()
>>> print(y)
ivy.array([1.32, 2.99, 0. ])
"""
return ivy.acosh(self._data, out=out)
def acos(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.acos. This method simply
wraps the function, and so the docstring for ivy.acos also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse cosine of each element in ``self``.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([1.0, 0.0, -0.9])
>>> y = x.acos()
>>> print(y)
ivy.array([0. , 1.57, 2.69])
"""
return ivy.acos(self._data, out=out)
def add(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.add. This method simply
wraps the function, and so the docstring for ivy.add also applies to
this method with minimal changes.
Parameters
----------
self
first input array. Should have a numeric data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
alpha
optional scalar multiplier for ``x2``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise sums. The returned array must have a
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.add(y)
>>> print(z)
ivy.array([5, 7, 9])
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.add(y, alpha=2)
>>> print(z)
ivy.array([9, 12, 15])
"""
return ivy.add(self._data, x2, alpha=alpha, out=out)
def asin(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.asin. This method simply
wraps the function, and so the docstring for ivy.asin also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse sine of each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([-1., 1., 4., 0.8])
>>> y = x.asin()
>>> print(y)
ivy.array([-1.57, 1.57, nan, 0.927])
>>> x = ivy.array([-3., -0.9, 1.5, 2.8])
>>> y = ivy.zeros(4)
>>> x.asin(out=y)
>>> print(y)
ivy.array([nan, -1.12, nan, nan])
"""
return ivy.asin(self._data, out=out)
def asinh(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.asinh. This method simply
wraps the function, and so the docstring for ivy.asinh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent the area of a hyperbolic sector.
Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse hyperbolic sine of each element in ``self``.
The returned array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([-1., 0., 3.])
>>> y = x.asinh()
>>> print(y)
ivy.array([-0.881, 0. , 1.82 ])
"""
return ivy.asinh(self._data, out=out)
def atan(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.atan. This method simply
wraps the function, and so the docstring for ivy.atan also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse tangent of each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([1.0, 0.5, -0.5])
>>> y = x.atan()
>>> print(y)
ivy.array([ 0.785, 0.464, -0.464])
"""
return ivy.atan(self._data, out=out)
def atan2(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.atan2. This method simply
wraps the function, and so the docstring for ivy.atan2 also applies to
this method with minimal changes.
Parameters
----------
self
first input array corresponding to the y-coordinates.
Should have a real-valued floating-point data type.
x2
second input array corresponding to the x-coordinates.
Must be compatible with ``self``(see :ref:`broadcasting`).
Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse tangent of the quotient ``self/x2``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1.0, 0.5, 0.0, -0.5, 0.0])
>>> y = ivy.array([1.0, 2.0, -1.5, 0, 1.0])
>>> z = x.atan2(y)
>>> print(z)
ivy.array([ 0.785, 0.245, 3.14 , -1.57 , 0. ])
>>> x = ivy.array([1.0, 2.0])
>>> y = ivy.array([-2.0, 3.0])
>>> z = ivy.zeros(2)
>>> x.atan2(y, out=z)
>>> print(z)
ivy.array([2.68 , 0.588])
>>> nan = float("nan")
>>> x = ivy.array([nan, 1.0, 1.0, -1.0, -1.0])
>>> y = ivy.array([1.0, +0, -0, +0, -0])
>>> x.atan2(y)
ivy.array([ nan, 1.57, 1.57, -1.57, -1.57])
>>> x = ivy.array([+0, +0, +0, +0, -0, -0, -0, -0])
>>> y = ivy.array([1.0, +0, -0, -1.0, 1.0, +0, -0, -1.0])
>>> x.atan2(y)
ivy.array([0. , 0. , 0. , 3.14, 0. , 0. , 0. , 3.14])
>>> y.atan2(x)
ivy.array([ 1.57, 0. , 0. , -1.57, 1.57, 0. , 0. , -1.57])
>>> inf = float("infinity")
>>> x = ivy.array([inf, -inf, inf, inf, -inf, -inf])
>>> y = ivy.array([1.0, 1.0, inf, -inf, inf, -inf])
>>> z = x.atan2(y)
>>> print(z)
ivy.array([ 1.57 , -1.57 , 0.785, 2.36 , -0.785, -2.36 ])
>>> x = ivy.array([2.5, -1.75, 3.2, 0, -1.0])
>>> y = ivy.array([-3.5, 2, 0, 0, 5])
>>> z = x.atan2(y)
>>> print(z)
ivy.array([ 2.52 , -0.719, 1.57 , 0. , -0.197])
>>> x = ivy.array([[1.1, 2.2, 3.3], [-4.4, -5.5, -6.6]])
>>> y = x.atan2(x)
>>> print(y)
ivy.array([[ 0.785, 0.785, 0.785],
[-2.36 , -2.36 , -2.36 ]])
"""
return ivy.atan2(self._data, x2, out=out)
def atanh(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.atanh. This method simply
wraps the function, and so the docstring for ivy.atanh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent the area of a hyperbolic sector.
Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the inverse hyperbolic tangent of each element
in ``self``. The returned array must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([0.0, 0.5, -0.9])
>>> y = x.atanh()
>>> print(y)
ivy.array([ 0. , 0.549, -1.47 ])
"""
return ivy.atanh(self._data, out=out)
def bitwise_and(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_and. This method
simply wraps the function, and so the docstring for ivy.bitwise_and
also applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have an integer or boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have an integer or boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([True, False])
>>> y = ivy.array([True, True])
>>> x.bitwise_and(y, out=y)
>>> print(y)
ivy.array([ True, False])
>>> x = ivy.array([[7],[8],[9]])
>>> y = ivy.native_array([[10],[11],[12]])
>>> z = x.bitwise_and(y)
>>> print(z)
ivy.array([[2],[8],[8]])
"""
return ivy.bitwise_and(self._data, x2, out=out)
def bitwise_left_shift(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_left_shift. This
method simply wraps the function, and so the docstring for
ivy.bitwise_left_shift also applies to this method with minimal
changes.
Parameters
----------
self
first input array. Should have an integer or boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
"""
return ivy.bitwise_left_shift(self._data, x2, out=out)
def bitwise_invert(
self: ivy.Array, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_invert. This method
simply wraps the function, and so the docstring for ivy.bitiwse_invert
also applies to this method with minimal changes.
Parameters
----------
self
input array. Should have an integer or boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([1, 6, 9])
>>> y = x.bitwise_invert()
>>> print(y)
ivy.array([-2, -7, -10])
>>> x = ivy.array([False, True])
>>> y = x.bitwise_invert()
>>> print(y)
ivy.array([True, False])
"""
return ivy.bitwise_invert(self._data, out=out)
def bitwise_or(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_or. This method
simply wraps the function, and so the docstring for ivy.bitwise_or also
applies to this method with minimal changes.
Parameters
----------
self
input array. Should have an integer or boolean data type.
x2
second input array. Must be compatible with ``self``
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.bitwise_or(y)
>>> print(z)
ivy.array([5, 7, 7])
"""
return ivy.bitwise_or(self._data, x2, out=out)
def bitwise_right_shift(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_right_shift. This
method simply wraps the function, and so the docstring for
ivy.bitwise_right_shift also applies to this method with minimal
changes.
Parameters
----------
self
first input array. Should have an integer or boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have an integer or boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> a = ivy.array([[2, 3, 4], [5, 10, 64]])
>>> b = ivy.array([0, 1, 2])
>>> y = a.bitwise_right_shift(b)
>>> print(y)
ivy.array([[ 2, 1, 1],
[ 5, 5, 16]])
"""
return ivy.bitwise_right_shift(self._data, x2, out=out)
def bitwise_xor(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.bitwise_xor. This method
simply wraps the function, and so the docstring for ivy.bitwise_xor
also applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have an integer or boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> a = ivy.array([[89, 51, 32], [14, 18, 19]])
>>> b = ivy.array([[[19, 26, 27], [22, 23, 20]]])
>>> y = a.bitwise_xor(b)
>>> print(y)
ivy.array([[[74,41,59],[24,5,7]]])
"""
return ivy.bitwise_xor(self._data, x2, out=out)
def ceil(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.ceil. This method simply
wraps the function, and so the docstring for ivy.ceil also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the rounded result for each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([5.5, -2.5, 1.5, -0])
>>> y = x.ceil()
>>> print(y)
ivy.array([ 6., -2., 2., 0.])
"""
return ivy.ceil(self._data, out=out)
def cos(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.cos. This method simply
wraps the function, and so the docstring for ivy.cos also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements are each expressed in radians. Should have a
floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the cosine of each element in ``self``. The returned
array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 0., 2.,])
>>> y = x.cos()
>>> print(y)
ivy.array([0.54, 1., -0.416])
>>> x = ivy.array([-3., 0., 3.])
>>> y = ivy.zeros(3)
>>> x.cos(out=y)
>>> print(y)
ivy.array([-0.99, 1. , -0.99])
>>> x = ivy.array([[0., 1.,], [2., 3.]])
>>> y = x.cos()
>>> print(y)
ivy.array([[1., 0.540], [-0.416, -0.990]])
"""
return ivy.cos(self._data, out=out)
def cosh(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.cosh. This method simply
wraps the function, and so the docstring for ivy.cosh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent a hyperbolic angle.
Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the hyperbolic cosine of each element in ``self``.
The returned array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> print(x.cosh())
ivy.array([1.54, 3.76, 10.1])
>>> x = ivy.array([0.23, 3., -1.2])
>>> y = ivy.zeros(3)
>>> print(x.cosh(out=y))
ivy.array([1.03, 10.1, 1.81])
"""
return ivy.cosh(self._data, out=out)
def divide(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.divide. This method simply
wraps the function, and so the docstring for ivy.divide also applies to
this method with minimal changes.
Parameters
----------
self
dividend input array. Should have a real-valued data type.
x2
divisor input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([2., 7., 9.])
>>> x2 = ivy.array([2., 2., 2.])
>>> y = x1.divide(x2)
>>> print(y)
ivy.array([1., 3.5, 4.5])
With mixed :class:`ivy.Array` and `ivy.NativeArray` inputs:
>>> x1 = ivy.array([2., 7., 9.])
>>> x2 = ivy.native_array([2., 2., 2.])
>>> y = x1.divide(x2)
>>> print(y)
ivy.array([1., 3.5, 4.5])
"""
return ivy.divide(self._data, x2, out=out)
def equal(
self: ivy.Array,
x2: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.equal. This method simply
wraps the function, and so the docstring for ivy.equal also applies to
this method with minimal changes.
Parameters
----------
self
first input array. May have any data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
May have any data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([2., 7., 9.])
>>> x2 = ivy.array([1., 7., 9.])
>>> y = x1.equal(x2)
>>> print(y)
ivy.array([False, True, True])
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([2.5, 7.3, 9.375])
>>> x2 = ivy.native_array([2.5, 2.9, 9.375])
>>> y = x1.equal(x2)
>>> print(y)
ivy.array([True, False, True])
With mixed :class:`ivy.Array` and `float` inputs:
>>> x1 = ivy.array([2.5, 7.3, 9.375])
>>> x2 = 7.3
>>> y = x1.equal(x2)
>>> print(y)
ivy.array([False, True, False])
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.array([3., 1., 0.9])
>>> x2 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> y = x1.equal(x2)
>>> print(y)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, True, True])
}
"""
return ivy.equal(self._data, x2, out=out)
def exp(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.exp. This method simply
wraps the function, and so the docstring for ivy.exp also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated exponential function result for
each element in ``self``. The returned array must have a floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> print(x.exp())
ivy.array([ 2.71828198, 7.38905573, 20.08553696])
"""
return ivy.exp(self._data, out=out)
def expm1(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.expm1. This method simply
wraps the function, and so the docstring for ivy.expm1 also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output array, for writing the result to. It must have
a shape that the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``x``.
The returned array must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([5.5, -2.5, 1.5, -0])
>>> y = x.expm1()
>>> print(y)
ivy.array([244. , -0.918, 3.48 , 0. ])
>>> y = ivy.array([0., 0.])
>>> x = ivy.array([5., 0.])
>>> _ = x.expm1(out=y)
>>> print(y)
ivy.array([147., 0.])
"""
return ivy.expm1(self._data, out=out)
def floor(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.floor. This method simply
wraps the function, and so the docstring for ivy.floor also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the rounded result for each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([5.5, -2.5, 1.5, -0])
>>> y = x.floor()
>>> print(y)
ivy.array([ 5., -3., 1., 0.])
"""
return ivy.floor(self._data, out=out)
def floor_divide(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.floor_divide. This method
simply wraps the function, and so the docstring for ivy.floor_divide
also applies to this method with minimal changes.
Parameters
----------
self
dividend input array. Should have a real-valued data type.
x2
divisor input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([13., 7., 8.])
>>> x2 = ivy.array([3., 2., 7.])
>>> y = x1.floor_divide(x2)
>>> print(y)
ivy.array([4., 3., 1.])
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([13., 7., 8.])
>>> x2 = ivy.native_array([3., 2., 7.])
>>> y = x1.floor_divide(x2)
>>> print(y)
ivy.array([4., 3., 1.])
"""
return ivy.floor_divide(self._data, x2, out=out)
def fmin(
self: ivy.Array,
x2: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.fmin. This method simply
wraps the function, and so the docstring for ivy.fmin also applies to
this method with minimal changes.
Parameters
----------
self
First input array.
x2
Second input array
out
optional output array, for writing the result to.
Returns
-------
ret
Array with element-wise minimums.
Examples
--------
>>> x1 = ivy.array([2, 3, 4])
>>> x2 = ivy.array([1, 5, 2])
>>> ivy.fmin(x1, x2)
ivy.array([1, 3, 2])
>>> x1 = ivy.array([ivy.nan, 0, ivy.nan])
>>> x2 = ivy.array([0, ivy.nan, ivy.nan])
>>> x1.fmin(x2)
ivy.array([ 0., 0., nan])
"""
return ivy.fmin(self._data, x2, out=out)
def greater(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.greater. This method simply
wraps the function, and so the docstring for ivy.greater also applies
to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array must
have a data type of ``bool``.
Examples
--------
>>> x1 = ivy.array([2., 5., 15.])
>>> x2 = ivy.array([3., 2., 4.])
>>> y = x1.greater(x2)
>>> print(y)
ivy.array([False, True, True])
"""
return ivy.greater(self._data, x2, out=out)
def greater_equal(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.greater_equal. This method
simply wraps the function, and so the docstring for ivy.greater_equal
also applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.greater_equal(y)
>>> print(z)
ivy.array([False,False,False])
"""
return ivy.greater_equal(self._data, x2, out=out)
def isfinite(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isfinite. This method
simply wraps the function, and so the docstring for ivy.isfinite also
applies to this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing test results. An element ``out_i`` is ``True``
if ``self_i`` is finite and ``False`` otherwise.
The returned array must have a data type of ``bool``.
Examples
--------
>>> x = ivy.array([0, ivy.nan, -ivy.inf, float('inf')])
>>> y = x.isfinite()
>>> print(y)
ivy.array([ True, False, False, False])
"""
return ivy.isfinite(self._data, out=out)
def isinf(
self: ivy.Array,
*,
detect_positive: bool = True,
detect_negative: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isinf. This method simply
wraps the function, and so the docstring for ivy.isinf also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
detect_positive
if ``True``, positive infinity is detected.
detect_negative
if ``True``, negative infinity is detected.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing test results. An element ``out_i`` is ``True``
if ``self_i`` is either positive or negative infinity and ``False``
otherwise. The returned array must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1, 2, 3])
>>> x.isinf()
ivy.array([False, False, False])
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> x.isinf()
ivy.array([[False, False, False]])
>>> x = ivy.array([[[1.1], [float('inf')], [-6.3]]])
>>> x.isinf()
ivy.array([[[False],[True],[False]]])
>>> x = ivy.array([[-float('inf'), float('inf'), 0.0]])
>>> x.isinf()
ivy.array([[ True, True, False]])
>>> x = ivy.zeros((3, 3))
>>> x.isinf()
ivy.array([[False, False, False],
[False, False, False],
[False, False, False]])
"""
return ivy.isinf(
self._data,
detect_positive=detect_positive,
detect_negative=detect_negative,
out=out,
)
def isnan(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isnan. This method simply
wraps the function, and so the docstring for ivy.isnan also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing test results. An element ``out_i`` is ``True``
if ``self_i`` is ``NaN`` and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([1, 2, 3])
>>> x.isnan()
ivy.array([False, False, False])
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> x.isnan()
ivy.array([[False, False, False]])
>>> x = ivy.array([[[1.1], [float('inf')], [-6.3]]])
>>> x.isnan()
ivy.array([[[False],
[False],
[False]]])
>>> x = ivy.array([[-float('nan'), float('nan'), 0.0]])
>>> x.isnan()
ivy.array([[ True, True, False]])
>>> x = ivy.array([[-float('nan'), float('inf'), float('nan'), 0.0]])
>>> x.isnan()
ivy.array([[ True, False, True, False]])
>>> x = ivy.zeros((3, 3))
>>> x.isnan()
ivy.array([[False, False, False],
[False, False, False],
[False, False, False]])
"""
return ivy.isnan(self._data, out=out)
def less(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.less. This method simply
wraps the function, and so the docstring for ivy.less also applies to
this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
>>> x1 = ivy.array([2., 5., 15.])
>>> x2 = ivy.array([3., 2., 4.])
>>> y = x1.less(x2)
>>> print(y)
ivy.array([ True, False, False])
"""
return ivy.less(self._data, x2, out=out)
def less_equal(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.less_equal. This method
simply wraps the function, and so the docstring for ivy.less_equal also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
With :code:'ivy.Array' inputs:
>>> x1 = ivy.array([1, 2, 3])
>>> x2 = ivy.array([2, 2, 1])
>>> y = x1.less_equal(x2)
>>> print(y)
ivy.array([True, True, False])
With mixed :code:'ivy.Array' and :code:'ivy.NativeArray' inputs:
>>> x1 = ivy.array([2.5, 3.3, 9.24])
>>> x2 = ivy.native_array([2.5, 1.1, 9.24])
>>> y = x1.less_equal(x2)
>>> print(y)
ivy.array([True, False, True])
With mixed :code:'ivy.Container' and :code:'ivy.Array' inputs:
>>> x1 = ivy.array([3., 1., 0.8])
>>> x2 = ivy.Container(a=ivy.array([2., 1., 0.7]), b=ivy.array([3., 0.6, 1.2]))
>>> y = x1.less_equal(x2)
>>> print(y)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, True])
}
"""
return ivy.less_equal(self._data, x2, out=out)
def log(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.log. This method simply
wraps the function, and so the docstring for ivy.log also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([4.0, 1, -0.0, -5.0])
>>> y = x.log()
>>> print(y)
ivy.array([1.39, 0., -inf, nan])
>>> x = ivy.array([float('nan'), -5.0, -0.0, 1.0, 5.0, float('+inf')])
>>> y = x.log()
>>> print(y)
ivy.array([nan, nan, -inf, 0., 1.61, inf])
>>> x = ivy.array([[float('nan'), 1, 5.0, float('+inf')],
... [+0, -1.0, -5, float('-inf')]])
>>> y = x.log()
>>> print(y)
ivy.array([[nan, 0., 1.61, inf],
[-inf, nan, nan, nan]])
"""
return ivy.log(self._data, out=out)
def log1p(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.log1p. This method simply
wraps the function, and so the docstring for ivy.log1p also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> y = x.log1p()
>>> print(y)
ivy.array([0.693, 1.1 , 1.39 ])
>>> x = ivy.array([0.1 , .001 ])
>>> x.log1p(out = x)
>>> print(x)
ivy.array([0.0953, 0.001 ])
"""
return ivy.log1p(self._data, out=out)
def log2(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.log2. This method simply
wraps the function, and so the docstring for ivy.log2 also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated base ``2`` logarithm for each element
in ``self``. The returned array must have a real-valued floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
Using :code:`ivy.Array` instance method:
>>> x = ivy.array([5.0, 1, -0.0, -6.0])
>>> y = ivy.log2(x)
>>> print(y)
ivy.array([2.32, 0., -inf, nan])
>>> x = ivy.array([float('nan'), -5.0, -0.0, 1.0, 5.0, float('+inf')])
>>> y = x.log2()
>>> print(y)
ivy.array([nan, nan, -inf, 0., 2.32, inf])
>>> x = ivy.array([[float('nan'), 1, 5.0, float('+inf')],\
[+0, -2.0, -5, float('-inf')]])
>>> y = x.log2()
>>> print(y)
ivy.array([[nan, 0., 2.32, inf],
[-inf, nan, nan, nan]])
"""
return ivy.log2(self._data, out=out)
def log10(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.log10. This method simply
wraps the function, and so the docstring for ivy.log10 also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated base ``10`` logarithm for each element
in ``self``. The returned array must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([4.0, 1, -0.0, -5.0])
>>> y = x.log10()
>>> print(y)
ivy.array([0.602, 0., -inf, nan])
>>> x = ivy.array([float('nan'), -5.0, -0.0, 1.0, 5.0, float('+inf')])
>>> y = x.log10()
>>> print(y)
ivy.array([nan, nan, -inf, 0., 0.699, inf])
>>> x = ivy.array([[float('nan'), 1, 5.0, float('+inf')],
... [+0, -1.0, -5, float('-inf')]])
>>> y = x.log10()
>>> print(y)
ivy.array([[nan, 0., 0.699, inf],
[-inf, nan, nan, nan]])
"""
return ivy.log10(self._data, out=out)
def logaddexp(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logaddexp. This method
simply wraps the function, and so the docstring for ivy.logaddexp also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a real-valued floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([2., 5., 15.])
>>> y = ivy.array([3., 2., 4.])
>>> z = x.logaddexp(y)
>>> print(z)
ivy.array([ 3.31, 5.05, 15. ])
"""
return ivy.logaddexp(self._data, x2, out=out)
def logaddexp2(
self: Union[ivy.Array, float, list, tuple],
x2: Union[ivy.Array, float, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logaddexp2. This method
simply wraps the function, and so the docstring for ivy.logaddexp2 also
applies to this method with minimal changes.
Parameters
----------
self
First array-like input.
x2
Second array-like input
out
optional output array, for writing the result to.
Returns
-------
ret
Element-wise logaddexp2 of x1 and x2.
Examples
--------
>>> x1 = ivy.array([1, 2, 3])
>>> x2 = ivy.array([4, 5, 6])
>>> x1.logaddexp2(x2)
ivy.array([4.169925, 5.169925, 6.169925])
"""
return ivy.logaddexp2(self._data, x2, out=out)
def logical_and(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logical_and. This method
simply wraps the function, and so the docstring for ivy.logical_and
also applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
Using 'ivy.Array' instance:
>>> x = ivy.array([True, False, True, False])
>>> y = ivy.array([True, True, False, False])
>>> z = x.logical_and(y)
>>> print(z)
ivy.array([True, False, False, False])
"""
return ivy.logical_and(self._data, x2, out=out)
def logical_not(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logical_not. This method
simply wraps the function, and so the docstring for ivy.logical_not
also applies to this method with minimal changes.
Parameters
----------
self
input array. Should have a boolean data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Array` input:
>>> x=ivy.array([0,1,1,0])
>>> x.logical_not()
ivy.array([ True, False, False, True])
>>> x=ivy.array([2,0,3,9])
>>> x.logical_not()
ivy.array([False, True, False, False])
"""
return ivy.logical_not(self._data, out=out)
def logical_or(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logical_or. This method
simply wraps the function, and so the docstring for ivy.logical_or also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a boolean data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of
the `docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.logical_or.html>`_
in the standard.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also
accepts :class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([False, 3, 0])
>>> y = ivy.array([2, True, False])
>>> z = x.logical_or(y)
>>> print(z)
ivy.array([ True, True, False])
"""
return ivy.logical_or(self._data, x2, out=out)
def logical_xor(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logical_xor. This method
simply wraps the function, and so the docstring for ivy.logical_xor
also applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a boolean data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned array
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.array([True, False, True, False])
>>> y = ivy.array([True, True, False, False])
>>> z = x.logical_xor(y)
>>> print(z)
ivy.array([False, True, True, False])
"""
return ivy.logical_xor(self._data, x2, out=out)
def multiply(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.multiply. This method
simply wraps the function, and so the docstring for ivy.multiply also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with the first input array.
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise products.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :code:`ivy.Array` inputs:
>>> x1 = ivy.array([3., 5., 7.])
>>> x2 = ivy.array([4., 6., 8.])
>>> y = x1.multiply(x2)
>>> print(y)
ivy.array([12., 30., 56.])
With mixed :code:`ivy.Array` and `ivy.NativeArray` inputs:
>>> x1 = ivy.array([8., 6., 7.])
>>> x2 = ivy.native_array([1., 2., 3.])
>>> y = x1.multiply(x2)
>>> print(y)
ivy.array([ 8., 12., 21.])
"""
return ivy.multiply(self._data, x2, out=out)
def maximum(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
use_where: bool = True,
out: Optional[ivy.Array] = None,
):
"""ivy.Array instance method variant of ivy.maximum. This method simply
wraps the function, and so the docstring for ivy.maximum also applies
to this method with minimal changes.
Parameters
----------
self
Input array containing elements to maximum threshold.
x2
Tensor containing maximum values, must be broadcastable to x1.
use_where
Whether to use :func:`where` to calculate the maximum. If ``False``, the
maximum is calculated using the ``(x + y + |x - y|)/2`` formula. Default is
``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array with the elements of x1, but clipped to not be lower than the x2
values.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([7, 9, 5])
>>> y = ivy.array([9, 3, 2])
>>> z = x.maximum(y)
>>> print(z)
ivy.array([9, 9, 5])
>>> x = ivy.array([1, 5, 9, 8, 3, 7])
>>> y = ivy.array([[9], [3], [2]])
>>> z = ivy.zeros((3, 6))
>>> x.maximum(y, out=z)
>>> print(z)
ivy.array([[9.,9.,9.,9.,9.,9.],
[3.,5.,9.,8.,3.,7.],
[2.,5.,9.,8.,3.,7.]])
>>> x = ivy.array([[7, 3]])
>>> y = ivy.array([0, 7])
>>> x.maximum(y, out=x)
>>> print(x)
ivy.array([[7, 7]])
"""
return ivy.maximum(self, x2, use_where=use_where, out=out)
def minimum(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
use_where: bool = True,
out: Optional[ivy.Array] = None,
):
"""ivy.Array instance method variant of ivy.minimum. This method simply
wraps the function, and so the docstring for ivy.minimum also applies
to this method with minimal changes.
Parameters
----------
self
Input array containing elements to minimum threshold.
x2
Tensor containing minimum values, must be broadcastable to x1.
use_where
Whether to use :func:`where` to calculate the minimum. If ``False``, the
minimum is calculated using the ``(x + y - |x - y|)/2`` formula. Default is
``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array with the elements of x1, but clipped to not exceed the x2 values.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([7, 9, 5])
>>> y = ivy.array([9, 3, 2])
>>> z = x.minimum(y)
>>> print(z)
ivy.array([7, 3, 2])
>>> x = ivy.array([1, 5, 9, 8, 3, 7])
>>> y = ivy.array([[9], [3], [2]])
>>> z = ivy.zeros((3, 6))
>>> x.minimum(y, out=z)
>>> print(z)
ivy.array([[1.,5.,9.,8.,3.,7.],
[1.,3.,3.,3.,3.,3.],
[1.,2.,2.,2.,2.,2.]])
>>> x = ivy.array([[7, 3]])
>>> y = ivy.array([0, 7])
>>> x.minimum(y, out=x)
>>> print(x)
ivy.array([[0, 3]])
"""
return ivy.minimum(self, x2, use_where=use_where, out=out)
def negative(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.negative. This method
simply wraps the function, and so the docstring for ivy.negative also
applies to this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``.
The returned array must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([2, 3 ,5, 7])
>>> y = x.negative()
>>> print(y)
ivy.array([-2, -3, -5, -7])
>>> x = ivy.array([0,-1,-0.5,2,3])
>>> y = ivy.zeros(5)
>>> x.negative(out=y)
>>> print(y)
ivy.array([-0. , 1. , 0.5, -2. , -3. ])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> x.negative(out=x)
>>> print(x)
ivy.array([[ -1.1, -2.2, -3.3],
[4.4, 5.5, 6.6]])
"""
return ivy.negative(self._data, out=out)
def not_equal(
self: ivy.Array,
x2: Union[float, ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.not_equal. This method
simply wraps the function, and so the docstring for ivy.not_equal also
applies to this method with minimal changes.
Parameters
----------
self
first input array. May have any data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results. The returned
array must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([2., 7., 9.])
>>> x2 = ivy.array([1., 7., 9.])
>>> y = x1.not_equal(x2)
>>> print(y)
ivy.array([True, False, False])
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([2.5, 7.3, 9.375])
>>> x2 = ivy.native_array([2.5, 2.9, 9.375])
>>> y = x1.not_equal(x2)
>>> print(y)
ivy.array([False, True, False])
With mixed :class:`ivy.Array` and `float` inputs:
>>> x1 = ivy.array([2.5, 7.3, 9.375])
>>> x2 = 7.3
>>> y = x1.not_equal(x2)
>>> print(y)
ivy.array([True, False, True])
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.array([3., 1., 0.9])
>>> x2 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> y = x1.not_equal(x2)
>>> print(y)
{
a: ivy.array([True, True, True]),
b: ivy.array([False, False, False])
}
"""
return ivy.not_equal(self._data, x2, out=out)
def positive(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.positive. This method
simply wraps the function, and so the docstring for ivy.positive also
applies to this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``.
The returned array must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([2, 3 ,5, 7])
>>> y = x.positive()
>>> print(y)
ivy.array([2, 3, 5, 7])
>>> x = ivy.array([0, -1, -0.5, 2, 3])
>>> y = ivy.zeros(5)
>>> x.positive(out=y)
>>> print(y)
ivy.array([0., -1., -0.5, 2., 3.])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> x.positive(out=x)
>>> print(x)
ivy.array([[ 1.1, 2.2, 3.3],
[-4.4, -5.5, -6.6]])
"""
return ivy.positive(self._data, out=out)
def pow(
self: ivy.Array,
x2: Union[int, float, ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.pow. This method simply
wraps the function, and so the docstring for ivy.pow also applies to
this method with minimal changes.
Parameters
----------
self
first input array whose elements correspond to the exponentiation base.
Should have a real-valued data type.
x2
second input array whose elements correspond to the exponentiation
exponent. Must be compatible with ``self`` (see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = x.pow(3)
>>> print(y)
ivy.array([1, 8, 27])
>>> x = ivy.array([1.5, -0.8, 0.3])
>>> y = ivy.zeros(3)
>>> x.pow(2, out=y)
>>> print(y)
ivy.array([2.25, 0.64, 0.09])
"""
return ivy.pow(self._data, x2, out=out)
def real(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.real. This method simply
wraps the function, and so the docstring for ivy.real also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array containing test results. If input in an
array is real then, it is returned unchanged. on the
other hand, if it is complex then, it returns real part from it
Examples
--------
>>> x = ivy.array([4+3j, 6+2j, 1-6j])
>>> x.real()
ivy.array([4., 6., 1.])
"""
return ivy.real(self._data, out=out)
def remainder(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
modulus: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.remainder. This method
simply wraps the function, and so the docstring for ivy.remainder also
applies to this method with minimal changes.
Parameters
----------
self
dividend input array. Should have a real-valued data type.
x2
divisor input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
modulus
whether to compute the modulus instead of the remainder.
Default is ``True``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
Each element-wise result must have the same sign as the respective
element ``x2_i``. The returned array must have a data type
determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([2., 5., 15.])
>>> x2 = ivy.array([3., 2., 4.])
>>> y = x1.remainder(x2)
>>> print(y)
ivy.array([2., 1., 3.])
With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> x1 = ivy.array([11., 4., 18.])
>>> x2 = ivy.native_array([2., 5., 8.])
>>> y = x1.remainder(x2)
>>> print(y)
ivy.array([1., 4., 2.])
"""
return ivy.remainder(self._data, x2, modulus=modulus, out=out)
def round(
self: ivy.Array, *, decimals: int = 0, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.round. This method simply
wraps the function, and so the docstring for ivy.round also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
decimals
number of decimal places to round to. Default is ``0``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the rounded result for each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([6.3, -8.1, 0.5, -4.2, 6.8])
>>> y = x.round()
>>> print(y)
ivy.array([ 6., -8., 0., -4., 7.])
>>> x = ivy.array([-94.2, 256.0, 0.0001, -5.5, 36.6])
>>> y = x.round()
>>> print(y)
ivy.array([-94., 256., 0., -6., 37.])
>>> x = ivy.array([0.23, 3., -1.2])
>>> y = ivy.zeros(3)
>>> x.round(out=y)
>>> print(y)
ivy.array([ 0., 3., -1.])
>>> x = ivy.array([[ -1., -67., 0., 15.5, 1.], [3, -45, 24.7, -678.5, 32.8]])
>>> y = x.round()
>>> print(y)
ivy.array([[-1., -67., 0., 16., 1.],
[3., -45., 25., -678., 33.]])
"""
return ivy.round(self._data, decimals=decimals, out=out)
def sign(
self: ivy.Array,
*,
np_variant: Optional[bool] = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sign. This method simply
wraps the function, and so the docstring for ivy.sign also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a numeric data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the evaluated result for each element in ``self``. The
returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.array([5.7, -7.1, 0, -0, 6.8])
>>> y = x.sign()
>>> print(y)
ivy.array([ 1., -1., 0., 0., 1.])
>>> x = ivy.array([-94.2, 256.0, 0.0001, -0.0001, 36.6])
>>> y = x.sign()
>>> print(y)
ivy.array([-1., 1., 1., -1., 1.])
>>> x = ivy.array([[ -1., -67., 0., 15.5, 1.], [3, -45, 24.7, -678.5, 32.8]])
>>> y = x.sign()
>>> print(y)
ivy.array([[-1., -1., 0., 1., 1.],
[ 1., -1., 1., -1., 1.]])
"""
return ivy.sign(self._data, np_variant=np_variant, out=out)
def sin(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sin. This method simply
wraps the function, and so the docstring for ivy.sin also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements are each expressed in radians. Should have a
floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the sine of each element in ``self``. The returned
array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([0., 1., 2., 3.])
>>> y = x.sin()
>>> print(y)
ivy.array([0., 0.841, 0.909, 0.141])
"""
return ivy.sin(self._data, out=out)
def sinh(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sinh. This method simply
wraps the function, and so the docstring for ivy.sinh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent a hyperbolic angle.
Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the hyperbolic sine of each element in ``self``. The
returned array must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1., 2., 3.])
>>> print(x.sinh())
ivy.array([1.18, 3.63, 10.])
>>> x = ivy.array([0.23, 3., -1.2])
>>> y = ivy.zeros(3)
>>> print(x.sinh(out=y))
ivy.array([0.232, 10., -1.51])
"""
return ivy.sinh(self._data, out=out)
def square(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.square. This method simply
wraps the function, and so the docstring for ivy.square also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the square of each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` instance method:
>>> x = ivy.array([1, 2, 3])
>>> y = x.square()
>>> print(y)
ivy.array([1, 4, 9])
>>> x = ivy.array([[1.2, 2, 3.1], [-1, -2.5, -9]])
>>> x.square(out=x)
>>> print(x)
ivy.array([[1.44,4.,9.61],[1.,6.25,81.]])
"""
return ivy.square(self._data, out=out)
def sqrt(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.sqrt. This method simply
wraps the function, and so the docstring for ivy.sqrt also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued floating-point data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the square root of each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Array` instance method:
>>> x = ivy.array([[1., 2.], [3., 4.]])
>>> y = x.sqrt()
>>> print(y)
ivy.array([[1. , 1.41],
[1.73, 2. ]])
"""
return ivy.sqrt(self._data, out=out)
def subtract(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: Optional[Union[int, float]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.subtract. This method
simply wraps the function, and so the docstring for ivy.subtract also
applies to this method with minimal changes.
Parameters
----------
self
first input array. Should have a real-valued data type.
x2
second input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
alpha
optional scalar multiplier for ``x2``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise differences. The returned array
must have a data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([5, 2, 3])
>>> y = ivy.array([1, 2, 6])
>>> z = x.subtract(y)
>>> print(z)
ivy.array([4, 0, -3])
>>> x = ivy.array([5., 5, 3])
>>> y = ivy.array([4, 5, 6])
>>> z = x.subtract(y, alpha=2)
>>> print(z)
ivy.array([-3., -5., -9.])
"""
return ivy.subtract(self._data, x2, alpha=alpha, out=out)
def trapz(
self: ivy.Array,
/,
*,
x: Optional[ivy.Array] = None,
dx: float = 1.0,
axis: int = -1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.trapz. This method simply
wraps the function, and so the docstring for ivy.trapz also applies to
this method with minimal changes.
Parameters
----------
self
The array that should be integrated.
x
The sample points corresponding to the input array values.
If x is None, the sample points are assumed to be evenly spaced
dx apart. The default is None.
dx
The spacing between sample points when x is None. The default is 1.
axis
The axis along which to integrate.
out
optional output array, for writing the result to.
Returns
-------
ret
Definite integral of n-dimensional array as approximated along
a single axis by the trapezoidal rule. If the input array is a
1-dimensional array, then the result is a float. If n is greater
than 1, then the result is an n-1 dimensional array.
Examples
--------
>>> y = ivy.array([1, 2, 3])
>>> ivy.trapz(y)
4.0
>>> y = ivy.array([1, 2, 3])
>>> x = ivy.array([4, 6, 8])
>>> ivy.trapz(y, x=x)
8.0
>>> y = ivy.array([1, 2, 3])
>>> ivy.trapz(y, dx=2)
8.0
"""
return ivy.trapz(self._data, x=x, dx=dx, axis=axis, out=out)
def tan(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.tan. This method simply
wraps the function, and so the docstring for ivy.tan also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements are expressed in radians. Should have a
floating-point data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the tangent of each element in ``self``.
The return must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([0., 1., 2.])
>>> y = x.tan()
>>> print(y)
ivy.array([0., 1.56, -2.19])
"""
return ivy.tan(self._data, out=out)
def tanh(
self: ivy.Array,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.tanh. This method simply
wraps the function, and so the docstring for ivy.tanh also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements each represent a hyperbolic angle.
Should have a real-valued floating-point data type.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the hyperbolic tangent of each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([0., 1., 2.])
>>> y = x.tanh()
>>> print(y)
ivy.array([0., 0.762, 0.964])
"""
return ivy.tanh(self._data, complex_mode=complex_mode, out=out)
def trunc(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.trunc. This method simply
wraps the function, and so the docstring for ivy.trunc also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the rounded result for each element in ``self``.
The returned array must have the same data type as ``self``
Examples
--------
>>> x = ivy.array([-1, 0.54, 3.67, -0.025])
>>> y = x.trunc()
>>> print(y)
ivy.array([-1., 0., 3., -0.])
"""
return ivy.trunc(self._data, out=out)
def erf(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.erf. This method simply
wraps the function, and so the docstring for ivy.erf also applies to
this method with minimal changes.
Parameters
----------
self
input array to compute exponential for.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the Gauss error of ``self``.
Examples
--------
>>> x = ivy.array([0, 0.3, 0.7, 1.0])
>>> x.erf()
ivy.array([0., 0.328, 0.677, 0.842])
"""
return ivy.erf(self._data, out=out)
def exp2(
self: Union[ivy.Array, float, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.exp2. This method simply
wraps the function, and so the docstring for ivy.exp2 also applies to
this method with minimal changes.
Parameters
----------
self
Array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
Element-wise 2 to the power x. This is a scalar if x is a scalar.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> x.exp2()
ivy.array([2., 4., 8.])
>>> x = [5, 6, 7]
>>> x.exp2()
ivy.array([32., 64., 128.])
"""
return ivy.exp2(self._data, out=out)
def gcd(
self: Union[ivy.Array, int, list, tuple],
x2: Union[ivy.Array, int, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.gcd. This method simply
wraps the function, and so the docstring for ivy.gcd also applies to
this method with minimal changes.
Parameters
----------
self
First array-like input.
x2
Second array-like input
out
optional output array, for writing the result to.
Returns
-------
ret
Element-wise gcd of |x1| and |x2|.
Examples
--------
>>> x1 = ivy.array([1, 2, 3])
>>> x2 = ivy.array([4, 5, 6])
>>> x1.gcd(x2)
ivy.array([1., 1., 3.])
>>> x1 = ivy.array([1, 2, 3])
>>> x1.gcd(10)
ivy.array([1., 2., 1.])
"""
return ivy.gcd(self._data, x2, out=out)
def nan_to_num(
self: ivy.Array,
/,
*,
copy: bool = True,
nan: Union[float, int] = 0.0,
posinf: Optional[Union[float, int]] = None,
neginf: Optional[Union[float, int]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.nan_to_num. This method
simply wraps the function, and so the docstring for ivy.nan_to_num also
applies to this method with minimal changes.
Parameters
----------
self
Array input.
copy
Whether to create a copy of x (True) or to replace values in-place (False).
The in-place operation only occurs if casting to an array does not require
a copy. Default is True.
nan
Value to be used to fill NaN values. If no value is passed then NaN values
will be replaced with 0.0.
posinf
Value to be used to fill positive infinity values. If no value is passed
then positive infinity values will be replaced with a very large number.
neginf
Value to be used to fill negative infinity values.
If no value is passed then negative infinity values
will be replaced with a very small (or negative) number.
out
optional output array, for writing the result to.
Returns
-------
ret
Array with the non-finite values replaced.
If copy is False, this may be x itself.
Examples
--------
>>> x = ivy.array([1, 2, 3, nan])
>>> x.nan_to_num()
ivy.array([1., 1., 3., 0.0])
>>> x = ivy.array([1, 2, 3, inf])
>>> x.nan_to_num(posinf=5e+100)
ivy.array([1., 2., 3., 5e+100])
"""
return ivy.nan_to_num(
self._data, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=out
)
def angle(
self: ivy.Array,
/,
*,
deg: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.angle. This method simply
wraps the function, and so the docstring for ivy.angle also applies to
this method with minimal changes.
Parameters
----------
z
Array-like input.
deg
optional bool.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns an array of angles for each complex number in the input.
If def is False(default), angle is calculated in radian and if
def is True, then angle is calculated in degrees.
Examples
--------
>>> ivy.set_backend('tensorflow')
>>> z = ivy.array([-1 + 1j, -2 + 2j, 3 - 3j])
>>> z
ivy.array([-1.+1.j, -2.+2.j, 3.-3.j])
>>> ivy.angle(z)
ivy.array([ 2.35619449, 2.35619449, -0.78539816])
>>> ivy.set_backend('numpy')
>>> ivy.angle(z,deg=True)
ivy.array([135., 135., -45.])
"""
return ivy.angle(self._data, deg=deg, out=out)
def reciprocal(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.reciprocal.This method
simply wraps the function, and so the docstring for ivy.reciprocal also
applies to this method with minimal changes.
Parameters
----------
self
input array to compute the element-wise reciprocal for.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the element-wise reciprocal of ``self``.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> y = x.reciprocal()
>>> print(y)
ivy.array([1., 0.5, 0.333])
"""
return ivy.reciprocal(self._data, out=out)
def deg2rad(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.deg2rad. This method simply
wraps the function, and so the docstring for ivy.deg2rad also applies
to this method with minimal changes.
Parameters
----------
self
input array. to be converted from degrees to radians.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the element-wise conversion from degrees to radians.
Examples
--------
With :class:`ivy.Array` input:
>>> x=ivy.array([90,180,270,360])
>>> y=x.deg2rad()
>>> print(y)
ivy.array([1.57, 3.14, 4.71, 6.28])
"""
return ivy.deg2rad(self._data, out=out)
def rad2deg(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.rad2deg. This method simply
wraps the function, and so the docstring for ivy.rad2deg also applies
to this method with minimal changes.
Parameters
----------
self
input array. to be converted from degrees to radians.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the element-wise conversion from radians to degrees.
Examples
--------
With :class:`ivy.Array` input:
>>> x=ivy.array([1., 5., 8., 10.])
>>> y=x.rad2deg()
>>> print(y)
ivy.array([ 57.3, 286. , 458. , 573. ])
"""
return ivy.rad2deg(self._data, out=out)
def trunc_divide(
self: ivy.Array,
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.trunc_divide. This method
simply wraps the function, and so the docstring for ivy.trunc_divide
also applies to this method with minimal changes.
Parameters
----------
self
dividend input array. Should have a real-valued data type.
x2
divisor input array. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the element-wise results.
The returned array must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x1 = ivy.array([2., 7., 9.])
>>> x2 = ivy.array([2., -2., 2.])
>>> y = x1.trunc_divide(x2)
>>> print(y)
ivy.array([ 1., -3., 4.])
"""
return ivy.trunc_divide(self._data, x2, out=out)
def isreal(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array:
"""ivy.Array instance method variant of ivy.isreal. This method simply
wraps the function, and so the docstring for ivy.isreal also applies to
this method with minimal changes.
Parameters
----------
self
input array. Should have a real-valued data type.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing test results. An element ``out_i`` is ``True``
if ``self_i`` is real number and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
>>> x = ivy.array([1j, 2+5j, 3.7-6j])
>>> x.isreal()
ivy.array([False, False, False])
"""
return ivy.isreal(self._data, out=out)
def lcm(
self: ivy.Array, x2: ivy.Array, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.lcm. This method simply
wraps the function, and so the docstring for ivy.lcm also applies to
this method with minimal changes.
Parameters
----------
self
first input array.
x2
second input array
out
optional output array, for writing the result to.
Returns
-------
ret
an array that includes the element-wise least common multiples
of 'self' and x2
Examples
--------
>>> x1=ivy.array([2, 3, 4])
>>> x2=ivy.array([5, 8, 15])
>>> x1.lcm(x2)
ivy.array([10, 21, 60])
"""
return ivy.lcm(self, x2, out=out)
| ivy/ivy/data_classes/array/elementwise.py/0 | {
"file_path": "ivy/ivy/data_classes/array/elementwise.py",
"repo_id": "ivy",
"token_count": 45528
} | 8 |