Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions paddle/phi/ops/yaml/python_api_info.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,16 @@
x : [input]
y : [values]

- op : i1
name : [paddle.i1,paddle.Tensor.i1]
args_alias :
use_default_mapping : True

- op : i1e
name : [paddle.i1e,paddle.Tensor.i1e]
args_alias :
use_default_mapping : True

- op : index_put
name : [paddle.index_put, paddle.Tensor.index_put]
args_alias :
Expand Down
69 changes: 69 additions & 0 deletions python/paddle/_paddle_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4124,3 +4124,72 @@ def conj(
) -> Tensor
""",
)

add_doc_and_signature(
"i1",
"""
The function is used to calculate modified bessel function of order 1.

Args:
x (Tensor): The input tensor, it's data type should be float32, float64,
uint8, int8, int16, int32, int64.
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

Returns:
- out (Tensor), A Tensor. the value of the modified bessel function of order 1 at x
(integer types are autocasted into float32).

Examples:
.. code-block:: python

>>> import paddle

>>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
>>> print(paddle.i1(x))
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.56515908, 1.59063685, 3.95337057, 9.75946712])
""",
"""
def i1(
x: Tensor,
name: str | None = None,
*,
out: Tensor | None = None
) -> Tensor
""",
)

add_doc_and_signature(
"i1e",
"""
The function is used to calculate exponentially scaled modified Bessel function of order 1.

Args:

x (Tensor): The input tensor, it's data type should be float32, float64,
uint8, int8, int16, int32, int64.
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

Returns:
- out (Tensor), A Tensor. the value of the exponentially scaled modified Bessel function of order 1 at x
(integer types are autocasted into float32).

Examples:
.. code-block:: python

>>> import paddle

>>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
>>> print(paddle.i1e(x))
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.20791042, 0.21526928, 0.19682673, 0.17875087])
""",
"""
def i1e(
x: Tensor,
name: str | None = None,
*,
out: Tensor | None = None
) -> Tensor
""",
)
4 changes: 3 additions & 1 deletion python/paddle/special.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
# limitations under the License.

from .tensor.compat_softmax import softmax
from .tensor.math import logsumexp
from .tensor.math import i1, i1e, logsumexp
from .tensor.ops import expm1

__all__ = [
"i1",
"i1e",
"logsumexp",
"softmax",
"expm1",
Expand Down
85 changes: 2 additions & 83 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
fmax,
fmin,
heaviside,
i1,
i1e,
isfinite,
isinf,
isnan,
Expand Down Expand Up @@ -6288,89 +6290,6 @@ def i0e(x: Tensor, name: str | None = None) -> Tensor:
return out


def i1(x: Tensor, name: str | None = None) -> Tensor:
"""
The function is used to calculate modified bessel function of order 1.

Args:
x (Tensor): The input tensor, it's data type should be float32, float64,
uint8, int8, int16, int32, int64.
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

Returns:
- out (Tensor), A Tensor. the value of the modified bessel function of order 1 at x
(integer types are autocasted into float32).

Examples:
.. code-block:: python

>>> import paddle

>>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
>>> print(paddle.i1(x))
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.56515908, 1.59063685, 3.95337057, 9.75946712])
"""
if in_dynamic_or_pir_mode():
return _C_ops.i1(x)
else:
check_variable_and_dtype(
x,
"x",
["float32", "float64", "uint8", "int8", "int16", "int32", "int64"],
"i1",
)

helper = LayerHelper("i1", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='i1', inputs={'x': x}, outputs={'out': out}, attrs={}
)
return out


def i1e(x: Tensor, name: str | None = None) -> Tensor:
"""
The function is used to calculate exponentially scaled modified Bessel function of order 1.

Args:

x (Tensor): The input tensor, it's data type should be float32, float64,
uint8, int8, int16, int32, int64.
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.

Returns:
- out (Tensor), A Tensor. the value of the exponentially scaled modified Bessel function of order 1 at x
(integer types are autocasted into float32).

Examples:
.. code-block:: python

>>> import paddle

>>> x = paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")
>>> print(paddle.i1e(x))
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 0.20791042, 0.21526928, 0.19682673, 0.17875087])
"""
if in_dynamic_or_pir_mode():
return _C_ops.i1e(x)
else:
check_variable_and_dtype(
x,
"x",
["float32", "float64", "uint8", "int8", "int16", "int32", "int64"],
"i1e",
)

helper = LayerHelper("i1e", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='i1e', inputs={'x': x}, outputs={'out': out}, attrs={}
)
return out


def polygamma(x: Tensor, n: int, name: str | None = None) -> Tensor:
r"""
Calculates the polygamma of the given input tensor, element-wise.
Expand Down
70 changes: 70 additions & 0 deletions test/legacy_test/test_i1_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,5 +166,75 @@ def test_check_grad(self):
self.check_grad(['x'], 'out')


class TestI1API_Compatibility(unittest.TestCase):
DTYPE = "float64"
DATA = [0, 1, 2, 3, 4, 5]

def setUp(self):
self.x = np.array(self.DATA).astype(self.DTYPE)
self.place = get_places()

def test_dygraph_Compatibility(self):
def run(place):
paddle.disable_static(place)
x = paddle.to_tensor(self.x)
paddle_dygraph_out = []
# Position args (args)
out1 = paddle.i1(x)
paddle_dygraph_out.append(out1)
# Key words args (kwargs) for paddle
out2 = paddle.i1(x=x)
paddle_dygraph_out.append(out2)
# Key words args for torch
out3 = paddle.i1(input=x)
paddle_dygraph_out.append(out3)
# Tensor method kwargs
out4 = x.i1()
paddle_dygraph_out.append(out4)
# Test out
out5 = paddle.empty([])
paddle.i1(x, out=out5)
paddle_dygraph_out.append(out5)
# scipy reference out
ref_out = reference_i1(self.x)
# Check
for out in paddle_dygraph_out:
np.testing.assert_allclose(out.numpy(), ref_out, rtol=1e-5)
paddle.enable_static()

for place in self.place:
run(place)

def test_static_Compatibility(self):
def run(place):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(
name="x", shape=self.x.shape, dtype=self.DTYPE
)
# Position args (args)
out1 = paddle.i1(x)
# Key words args (kwargs) for paddle
out2 = paddle.i1(x=x)
# Key words args for torch
out3 = paddle.i1(input=x)
# Tensor method args
out4 = x.i1()

exe = paddle.static.Executor(place)
fetches = exe.run(
paddle.static.default_main_program(),
feed={"x": self.x},
fetch_list=[out1, out2, out3, out4],
)
ref_out = reference_i1(self.x)
for out in fetches:
np.testing.assert_allclose(out, ref_out, rtol=1e-5)
paddle.disable_static()

for place in self.place:
run(place)


if __name__ == "__main__":
unittest.main()
70 changes: 70 additions & 0 deletions test/legacy_test/test_i1e_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,5 +166,75 @@ def test_check_grad(self):
self.check_grad(['x'], 'out')


class TestI1EAPI_Compatibility(unittest.TestCase):
DTYPE = "float64"
DATA = [0, 1, 2, 3, 4, 5]

def setUp(self):
self.x = np.array(self.DATA).astype(self.DTYPE)
self.place = get_places()

def test_dygraph_Compatibility(self):
def run(place):
paddle.disable_static(place)
x = paddle.to_tensor(self.x)
paddle_dygraph_out = []
# Position args (args)
out1 = paddle.i1e(x)
paddle_dygraph_out.append(out1)
# Key words args (kwargs) for paddle
out2 = paddle.i1e(x=x)
paddle_dygraph_out.append(out2)
# Key words args for torch
out3 = paddle.i1e(input=x)
paddle_dygraph_out.append(out3)
# Tensor method kwargs
out4 = x.i1e()
paddle_dygraph_out.append(out4)
# Test out
out5 = paddle.empty([])
paddle.i1e(x, out=out5)
paddle_dygraph_out.append(out5)
# scipy reference out
ref_out = reference_i1e(self.x)
# Check
for out in paddle_dygraph_out:
np.testing.assert_allclose(out.numpy(), ref_out, rtol=1e-5)
paddle.enable_static()

for place in self.place:
run(place)

def test_static_Compatibility(self):
def run(place):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(
name="x", shape=self.x.shape, dtype=self.DTYPE
)
# Position args (args)
out1 = paddle.i1e(x)
# Key words args (kwargs) for paddle
out2 = paddle.i1e(x=x)
# Key words args for torch
out3 = paddle.i1e(input=x)
# Tensor method args
out4 = x.i1e()

exe = paddle.static.Executor(place)
fetches = exe.run(
paddle.static.default_main_program(),
feed={"x": self.x},
fetch_list=[out1, out2, out3, out4],
)
ref_out = reference_i1e(self.x)
for out in fetches:
np.testing.assert_allclose(out, ref_out, rtol=1e-5)
paddle.disable_static()

for place in self.place:
run(place)


if __name__ == "__main__":
unittest.main()
Loading