Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
04869b2
sink inverse to cpp
Manfredss Dec 24, 2025
4547e2d
add out param to gcd and lcm
Manfredss Dec 26, 2025
7fb7e75
resolve conflict, simplify out return logic of gcd
Manfredss Dec 26, 2025
8a20c4c
Apply suggestion from @SigureMo
Manfredss Dec 26, 2025
7d294ec
Update python/paddle/_paddle_docs.py
Manfredss Dec 26, 2025
351f02c
Merge branch 'develop' into ApiEnhance_inverse
Manfredss Dec 30, 2025
e76806d
Merge branch 'develop' into ApiEnhance_inverse
Manfredss Jan 10, 2026
a368c47
fix code style
Manfredss Jan 10, 2026
d0204f5
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Manfredss Jan 14, 2026
64e8528
fix
Manfredss Jan 14, 2026
5fc7380
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Manfredss Jan 16, 2026
4f8d4a6
add test
Manfredss Jan 16, 2026
7aef806
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Manfredss Jan 16, 2026
e31d13c
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Manfredss Jan 20, 2026
3e63c12
fix, increase code coverage
Manfredss Jan 20, 2026
6a638b0
Update math.py
Manfredss Jan 20, 2026
9015edc
Merge branch 'PaddlePaddle:develop' into ApiEnhance_inverse
Manfredss Jan 20, 2026
d40f1c1
fix code style
Manfredss Jan 20, 2026
996358e
remove zh characters
Manfredss Jan 21, 2026
3608a16
resolve the CE error
Manfredss Jan 24, 2026
ac67a04
improve
Manfredss Jan 24, 2026
906548a
fix
Manfredss Jan 24, 2026
62ae4b1
extend monkey patch generator to support generic module paths
Manfredss Jan 29, 2026
2050090
tidy up
Manfredss Jan 29, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions paddle/phi/ops/yaml/python_api_info.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,11 @@
args_alias:
use_default_mapping : True

- op : inverse
name : [paddle.inverse, paddle.Tensor.inverse]
args_alias:
use_default_mapping : True

- op : log
name: [paddle.log, paddle.Tensor.log]
args_alias:
Expand Down
43 changes: 43 additions & 0 deletions python/paddle/_paddle_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3357,3 +3357,46 @@ def allclose(
) -> Tensor
""",
)

add_doc_and_signature(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以往这个文件里的中间插入,不然PR容易前后自己冲突。

"inverse",
"""
Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices.

Args:
x (Tensor): The input tensor. The last two
dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data
type can be float32, float64, complex64, complex128.
Alias: ``input``.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None.

Returns:
Tensor: A Tensor holds the inverse of x. The shape and data type
is the same as x.

Examples:
.. code-block:: python

>>> import paddle

>>> mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
>>> inv = paddle.inverse(mat)
>>> print(inv)
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.50000000, 0. ],
[0. , 0.50000000]])

""",
"""
def inverse(
x: Tensor,
name: str | None = None,
*,
out: Tensor | None = None,
) -> Tensor
""",
)
89 changes: 23 additions & 66 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
amax,
amin,
any,
inverse,
isfinite,
isinf,
isnan,
Expand Down Expand Up @@ -2897,63 +2898,6 @@ def outer(
return out


def inverse(x: Tensor, name: str | None = None) -> Tensor:
"""
Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices.

Args:
x (Tensor): The input tensor. The last two
dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data
type can be float32, float64, complex64, complex128.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Tensor: A Tensor holds the inverse of x. The shape and data type
is the same as x.

Examples:
.. code-block:: python

>>> import paddle

>>> mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
>>> inv = paddle.inverse(mat)
>>> print(inv)
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.50000000, 0. ],
[0. , 0.50000000]])

"""
if in_dynamic_or_pir_mode():
return _C_ops.inverse(x)
else:

def _check_input(x):
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'complex64', 'complex128'],
'inverse',
)
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
f"of dimensions is no less than 2. But received: {len(x.shape)}, "
f"x's shape: {x.shape}."
)

_check_input(x)
helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [x]}, outputs={'Output': [out]}
)
return out


@ForbidKeywordsDecorator(
illegal_keys={"input", "dim", "other"},
func_name="paddle.max",
Expand Down Expand Up @@ -5415,8 +5359,10 @@ def deg2rad(x: Tensor, name: str | None = None) -> Tensor:
return out


@param_two_alias(['x', 'input'], ['y', 'other'])
def gcd(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个装饰器性能要比param_two_alias差点,后面还是可以直接用param_two_alias

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个不需要改了

def gcd(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out参数未覆盖

x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None
) -> Tensor:
"""
Computes the element-wise greatest common divisor (GCD) of input |x| and |y|.
Both x and y must have integer types.
Expand All @@ -5430,6 +5376,7 @@ def gcd(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
x (Tensor): An N-D Tensor, the data type is int32, int64.
y (Tensor): An N-D Tensor, the data type is int32, int64.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
out (Tensor|None, optional): The output tensor. Default: None.

Returns:
out (Tensor): An N-D Tensor, the data type is the same with input.
Expand Down Expand Up @@ -5492,13 +5439,17 @@ def _gcd_body_fn(x, y):
if in_dynamic_mode():
while _gcd_cond_fn(x, y):
x, y = _gcd_body_fn(x, y)

return x
out_res = x
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64'], 'gcd')
check_variable_and_dtype(y, 'y', ['int32', 'int64'], 'gcd')
out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y])
out_res, _ = paddle.static.nn.while_loop(
_gcd_cond_fn, _gcd_body_fn, [x, y]
)
if out is not None:
paddle.assign(out_res, out)
return out
return out_res


@param_two_alias(['x', 'input'], ['y', 'other'])
Expand Down Expand Up @@ -5545,8 +5496,10 @@ def _gcd_body_fn(x, y):
return x


@param_two_alias(['x', 'input'], ['y', 'other'])
def lcm(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
@ParamAliasDecorator({"x": ["input"], "y": ["other"]})
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不需要改了

def lcm(
x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None
) -> Tensor:
"""
Computes the element-wise least common multiple (LCM) of input |x| and |y|.
Both x and y must have integer types.
Expand All @@ -5560,6 +5513,7 @@ def lcm(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
x (Tensor): An N-D Tensor, the data type is int32, int64.
y (Tensor): An N-D Tensor, the data type is int32, int64.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
out (Tensor|None, optional): The output tensor. Default: None.

Returns:
out (Tensor): An N-D Tensor, the data type is the same with input.
Expand Down Expand Up @@ -5600,10 +5554,13 @@ def lcm(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
# they won't be used.
d_equal_0 = paddle.equal(d, 0)
d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d)
out = paddle.where(
out_res = paddle.where(
d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe
)
return out
if out is not None:
paddle.assign(out_res, out)
return out
return out_res


@param_two_alias(['x', 'input'], ['y', 'other'])
Expand Down
67 changes: 57 additions & 10 deletions test/legacy_test/test_inverse_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class TestInverseOp(OpTest):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse

def setUp(self):
self.op_type = "inverse"
Expand Down Expand Up @@ -55,28 +55,28 @@ class TestInverseOpBatched(TestInverseOp):
def config(self):
self.matrix_shape = [8, 4, 4]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse


class TestInverseOpZeroSize(TestInverseOp):
def config(self):
self.matrix_shape = [0, 0]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse


class TestInverseOpBatchedZeroSize(TestInverseOp):
def config(self):
self.matrix_shape = [7, 0, 0]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse


class TestInverseOpLarge(TestInverseOp):
def config(self):
self.matrix_shape = [32, 32]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse

def test_grad(self):
self.check_grad(
Expand All @@ -88,7 +88,7 @@ class TestInverseOpFP32(TestInverseOp):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse

def test_grad(self):
self.check_grad(
Expand All @@ -100,21 +100,21 @@ class TestInverseOpBatchedFP32(TestInverseOpFP32):
def config(self):
self.matrix_shape = [8, 4, 4]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse


class TestInverseOpLargeFP32(TestInverseOpFP32):
def config(self):
self.matrix_shape = [32, 32]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse


class TestInverseOpComplex64(TestInverseOp):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "complex64"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse

def test_grad(self):
self.check_grad(['Input'], 'Output', check_pir=True)
Expand All @@ -124,7 +124,7 @@ class TestInverseOpComplex128(TestInverseOp):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "complex128"
self.python_api = paddle.tensor.math.inverse
self.python_api = paddle.inverse

def test_grad(self):
self.check_grad(['Input'], 'Output', check_pir=True)
Expand Down Expand Up @@ -253,6 +253,53 @@ def test_dygraph(self):
np.testing.assert_allclose(input.grad.shape, input.shape)


class TestInverseAPICompat(unittest.TestCase):
def setUp(self):
self.dtype = "float64"
self.shape = [10, 10]
np.random.seed(123)
self.x_np = np.random.random(self.shape).astype(self.dtype)
# Ensure invertible
while np.linalg.det(self.x_np) == 0:
self.x_np = np.random.random(self.shape).astype(self.dtype)
self.out_np = np.linalg.inv(self.x_np)

def test_alias(self):
paddle.disable_static()
x = paddle.to_tensor(self.x_np)
out = paddle.inverse(input=x)
np.testing.assert_allclose(out.numpy(), self.out_np, rtol=1e-5)
paddle.enable_static()

def test_out(self):
paddle.disable_static()
x = paddle.to_tensor(self.x_np)
out = paddle.empty_like(x)
paddle.inverse(x, out=out)
np.testing.assert_allclose(out.numpy(), self.out_np, rtol=1e-5)
paddle.enable_static()

def test_out_return(self):
paddle.disable_static()
x = paddle.to_tensor(self.x_np)
out = paddle.empty_like(x)
res = paddle.inverse(x, out=out)
np.testing.assert_allclose(res.numpy(), self.out_np, rtol=1e-5)
np.testing.assert_allclose(out.numpy(), self.out_np, rtol=1e-5)
paddle.enable_static()

def test_static_alias(self):
paddle.enable_static()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(name='x', shape=self.shape, dtype=self.dtype)
out = paddle.inverse(input=x)
exe = paddle.static.Executor(paddle.CPUPlace())
res = exe.run(feed={'x': self.x_np}, fetch_list=[out])
np.testing.assert_allclose(res[0], self.out_np, rtol=1e-5)


if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Loading