diff --git a/paddle/fluid/eager/auto_code_generator/generator/monkey_patch_gen.py b/paddle/fluid/eager/auto_code_generator/generator/monkey_patch_gen.py index f999bdfda09f11..e23b1d1e089d6e 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/monkey_patch_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/monkey_patch_gen.py @@ -23,6 +23,7 @@ IMPORT_TEMPLATE = """ import paddle from paddle import _C_ops +from paddle.tensor import magic_method_func from .. import core """ @@ -56,10 +57,18 @@ def _{name}(*args, **kwargs): return _C_ops.{name}(*args, **kwargs) """ SET_METHOD_TEMPLATE = """ - # set methods for paddle.Tensor in dygraph + # set methods && magical methods for paddle.Tensor in dygraph local_tensor = core.eager.Tensor + + magic_method_dict = {v: k for k, v in magic_method_func} + for method_name, method in methods_map: setattr(local_tensor, method_name, method) + + magic_name = magic_method_dict.get(method_name) + if magic_name: + setattr(local_tensor, magic_name, method) + setattr(paddle.tensor, method_name, method) """ diff --git a/paddle/fluid/pybind/arg_pre_process.cc b/paddle/fluid/pybind/arg_pre_process.cc index 4e2c338e904475..e76dbc80885198 100644 --- a/paddle/fluid/pybind/arg_pre_process.cc +++ b/paddle/fluid/pybind/arg_pre_process.cc @@ -153,6 +153,25 @@ void LogsumexpPreProcess(pir::Value* x, void SumPreProcess(Value* x, Value* axis) { paddle::dialect::SetStopGradient(axis); } + +void BinCountPreProcess(Tensor* x, + paddle::optional* weights, + Scalar* minlength) { + CheckDataType("bincount", + "x", + x->dtype(), + {phi::DataType::INT32, phi::DataType::INT64}); +} + +void BinCountPreProcess(Value* x, + paddle::optional* weights, + Value* minlength) { + CheckDataType("bincount", + "x", + pir::GetValueDtype(*x), + {phi::DataType::INT32, phi::DataType::INT64}); +} + void IsClosePreProcess(Value* x, Value* y, Value* rtol, Value* atol) { /* if in_pir_mode(): diff --git a/paddle/fluid/pybind/arg_pre_process.h b/paddle/fluid/pybind/arg_pre_process.h index d5387319e26c71..6ee453a852bb31 100644 --- a/paddle/fluid/pybind/arg_pre_process.h +++ b/paddle/fluid/pybind/arg_pre_process.h @@ -28,6 +28,7 @@ namespace pybind { using Tensor = paddle::Tensor; using Value = pir::Value; using IntArray = paddle::experimental::IntArray; +using Scalar = paddle::experimental::Scalar; using IntVector = std::vector; void ExpandAsPreProcess(paddle::Tensor* x, @@ -39,6 +40,13 @@ void ExpandAsPreProcess(Value* x, void RollPreProcess(Tensor* x, IntArray* shifts, IntVector* axis); void RollPreProcess(Value* x, Value* shifts, IntVector* axis); +void BinCountPreProcess(Tensor* x, + paddle::optional* weights, + Scalar* minlength); +void BinCountPreProcess(Value* x, + paddle::optional* weights, + Value* minlength); + void LogsumexpPreProcess(Tensor* x, std::vector* axis, bool* reduce_all); void LogsumexpPreProcess(Value* x, std::vector* axis, bool* reduce_all); diff --git a/paddle/phi/ops/yaml/python_api_info.yaml b/paddle/phi/ops/yaml/python_api_info.yaml index 1e752fc1c9eb2d..2ee1ca5823753f 100644 --- a/paddle/phi/ops/yaml/python_api_info.yaml +++ b/paddle/phi/ops/yaml/python_api_info.yaml @@ -1,288 +1,357 @@ -- op : acos - name : [paddle.acos,paddle.Tensor.acos] +- op : abs + name : [paddle.abs, paddle.Tensor.abs] args_alias : use_default_mapping : True -- op : amin - name : [paddle.amin,paddle.Tensor.amin] +- op : acos + name : [paddle.acos, paddle.Tensor.acos] args_alias : use_default_mapping : True - op : acosh - name : [paddle.acosh,paddle.Tensor.acosh] + name : [paddle.acosh, paddle.Tensor.acosh] args_alias : use_default_mapping : True -- op : amax - name : [paddle.amax,paddle.Tensor.amax] +- op : all + name : [paddle.all, paddle.Tensor.all] args_alias : use_default_mapping : True -- op : matmul - name : [paddle.matmul,paddle.Tensor.matmul] +- op : allclose + name : [paddle.allclose, paddle.Tensor.allclose] args_alias : use_default_mapping : True + pre_process : + static_func : AllClosePreProcess(x, y, rtol, atol) -- op : multiply - name : [paddle.multiply,paddle.Tensor.multiply] +- op : amax + name : [paddle.amax, paddle.Tensor.amax] args_alias : use_default_mapping : True -- op : log2 - name : [paddle.log2,paddle.Tensor.log2] +- op : amin + name : [paddle.amin, paddle.Tensor.amin] args_alias : use_default_mapping : True -- op : log10 - name : [paddle.log10,paddle.Tensor.log10] +- op : angle + name : [paddle.angle, paddle.Tensor.angle] args_alias : use_default_mapping : True -- op : log1p - name : [paddle.log1p,paddle.Tensor.log1p] +- op : any + name : [paddle.any, paddle.Tensor.any] args_alias : use_default_mapping : True -- op : maximum - name : [paddle.maximum,paddle.Tensor.maximum] +- op : argmax + name : [paddle.argmax, paddle.Tensor.argmax] + args_mapper : + func : ArgMaxMinMapper + +- op : argmin + name : [paddle.argmin, paddle.Tensor.argmin] + args_mapper : + func : ArgMaxMinMapper + +- op : asin + name : [paddle.asin, paddle.Tensor.asin] args_alias : use_default_mapping : True -- op : minimum - name : [paddle.minimum,paddle.Tensor.minimum] +- op : asinh + name : [paddle.asinh, paddle.Tensor.asinh] args_alias : use_default_mapping : True -- op : greater_than - name : [paddle.greater_than, paddle.Tensor.greater_than] +- op : atan + name : [paddle.atan, paddle.Tensor.atan] args_alias : use_default_mapping : True -- op : expand_as - name : [paddle.expand_as,paddle.Tensor.expand_as] +- op : atanh + name : [paddle.atanh, paddle.Tensor.atanh] args_alias : use_default_mapping : True - pre_process : - func : ExpandAsPreProcess(x,y,target_shape) -- op : logical_and - name : [paddle.logical_and, paddle.Tensor.logical_and] - args_alias: - use_default_mapping : True +- op : baddbmm + name : [paddle.baddbmm, paddle.Tensor.baddbmm] + args_alias : + x : [batch1] + y : [batch2] -- op : logical_or - name : [paddle.logical_or, paddle.Tensor.logical_or] - args_alias: - use_default_mapping : True +- op : bmm + name : [paddle.bmm, paddle.Tensor.bmm] + args_alias : + x : [input] + y : [mat2] -- op : logical_xor - name : [paddle.logical_xor, paddle.Tensor.logical_xor] - args_alias: - use_default_mapping : True +- op : bincount + name : [paddle.bincount, paddle.Tensor.bincount] + args_alias : + use_default_mapping : True + pre_process : + func : BinCountPreProcess(x, weights, minlength) -- op : logical_not - name : [paddle.logical_not, paddle.Tensor.logical_not] - args_alias: - use_default_mapping : True +- op : bitwise_and + name : [paddle.bitwise_and, paddle.Tensor.bitwise_and] + args_alias : + use_default_mapping : True -- op : argmax - name : [paddle.argmax, paddle.Tensor.argmax] - args_mapper : - func : ArgMaxMinMapper +- op : bitwise_not + name : [paddle.bitwise_not, paddle.Tensor.bitwise_not] + args_alias : + use_default_mapping : True -- op : argmin - name : [paddle.argmin, paddle.Tensor.argmin] - args_mapper : - func : ArgMaxMinMapper +- op : bitwise_xor + name : [paddle.bitwise_xor, paddle.Tensor.bitwise_xor] + args_alias : + use_default_mapping : True - op : ceil name : [paddle.ceil, paddle.Tensor.ceil] - args_alias: + args_alias : + use_default_mapping : True + +- op : conj + name: [paddle.conj, paddle.Tensor.conj] + args_alias : use_default_mapping : True +- op : cos + name : [paddle.cos, paddle.Tensor.cos] + args_alias : + use_default_mapping : True + +- op : cosh + name : [paddle.cosh, paddle.Tensor.cosh] + args_alias : + use_default_mapping : True + +- op : diag + name : [paddle.diag, paddle.Tensor.diag] + args_alias : + x : [input] + offset : [diagonal] + +- op : diagonal + name : [paddle.diagonal, paddle.Tensor.diagonal] + args_alias : + x : [input] + axis1 : [dim1] + axis2 : [dim2] + - op : dot name : [paddle.dot, paddle.Tensor.dot] - args_alias: + args_alias : x : [input] y : [tensor] -- op : all - name : [paddle.all,paddle.Tensor.all] - args_alias: +- op : exp + name : [paddle.exp, paddle.Tensor.exp] + args_alias : use_default_mapping : True -- op : allclose - name : [paddle.allclose, paddle.Tensor.allclose] +- op : expm1 + name : [paddle.expm1, paddle.Tensor.expm1] + args_alias : + use_default_mapping : True + +- op : expand_as + name : [paddle.expand_as, paddle.Tensor.expand_as] + args_alias : + use_default_mapping : True + pre_process : + func : ExpandAsPreProcess(x, y, target_shape) + +- op : floor + name : [paddle.floor, paddle.Tensor.floor] + args_alias : + use_default_mapping : True + +- op : fmax + name : [paddle.fmax, paddle.Tensor.fmax] + args_alias : + use_default_mapping : True + +- op : fmin + name : [paddle.fmin, paddle.Tensor.fmin] + args_alias : + use_default_mapping : True + +- op : gelu + name : [paddle.nn.functional.gelu] + args_alias : + use_default_mapping : True + args_mapper : + func : GeluMapper + +- op : greater_than + name : [paddle.greater_than, paddle.Tensor.greater_than] + args_alias : + use_default_mapping : True + +- op : grid_sample + name : [paddle.nn.functional.grid_sample] args_alias: - x: [input] - y: [other] + use_default_mapping : True pre_process: - static_func: AllClosePreProcess(x, y, rtol,atol) + func : GridSamplePreProcess(x, grid, mode, padding_mode, align_corners) -- op : bmm - name : [paddle.bmm, paddle.Tensor.bmm] - args_alias: +- op : heaviside + name : [paddle.heaviside, paddle.Tensor.heaviside] + args_alias : x : [input] - y : [mat2] -- op : cos - name: [paddle.cos, paddle.Tensor.cos] - args_alias: + y : [values] + +- op : index_put + name : [paddle.index_put, paddle.Tensor.index_put] + args_alias : + value : [values] use_default_mapping : True -- op : cosh - name: [paddle.cosh, paddle.Tensor.cosh] - args_alias: +- op : index_put_ + name : [paddle.index_put_, paddle.Tensor.index_put_] + args_alias : + value : [values] use_default_mapping : True -- op : floor - name: [paddle.floor, paddle.Tensor.floor] - args_alias: +- op : isclose + name : [paddle.isclose, paddle.Tensor.isclose] + args_alias : use_default_mapping : True + pre_process : + static_func : IsClosePreProcess(x, y, rtol, atol) - op : isfinite name : [paddle.isfinite, paddle.Tensor.isfinite] - args_alias: + args_alias : use_default_mapping : True - op : isinf name : [paddle.isinf, paddle.Tensor.isinf] - args_alias: + args_alias : use_default_mapping : True - op : isnan name : [paddle.isnan, paddle.Tensor.isnan] - args_alias: + args_alias : use_default_mapping : True - op : log - name: [paddle.log, paddle.Tensor.log] - args_alias: + name : [paddle.log, paddle.Tensor.log] + args_alias : use_default_mapping : True -- op : logsumexp - name : [paddle.logsumexp,paddle.Tensor.logsumexp] - args_alias: +- op : log1p + name : [paddle.log1p, paddle.Tensor.log1p] + args_alias : use_default_mapping : True - pre_process: - func: LogsumexpPreProcess(x, axis, reduce_all) -- op : roll - name : [paddle.roll, paddle.Tensor.roll] - args_alias: - axis : [dims] +- op : log10 + name : [paddle.log10, paddle.Tensor.log10] + args_alias : use_default_mapping : True - pre_process: - func : RollPreProcess(x, shifts, axis) -- op : rsqrt - name: [paddle.rsqrt, paddle.Tensor.rsqrt] - args_alias: +- op : log2 + name : [paddle.log2, paddle.Tensor.log2] + args_alias : use_default_mapping : True -- op : sigmoid - name : [paddle.sigmoid,paddle.Tensor.sigmoid,paddle.nn.functional.sigmoid] - args_alias: +- op : logsumexp + name : [paddle.logsumexp, paddle.Tensor.logsumexp] + args_alias : use_default_mapping : True + pre_process : + func : LogsumexpPreProcess(x, axis, reduce_all) -- op : sign - name: [paddle.sign, paddle.Tensor.sign] - args_alias: +- op : logical_and + name : [paddle.logical_and, paddle.Tensor.logical_and] + args_alias : use_default_mapping : True -- op : sin - name: [paddle.sin, paddle.Tensor.sin] - args_alias: +- op : logical_not + name : [paddle.logical_not, paddle.Tensor.logical_not] + args_alias : use_default_mapping : True -- op : asin - name : [paddle.asin, paddle.Tensor.asin] - args_alias: +- op : logical_or + name : [paddle.logical_or, paddle.Tensor.logical_or] + args_alias : use_default_mapping : True -- op : any - name : [paddle.any, paddle.Tensor.any] - args_alias: +- op : logical_xor + name : [paddle.logical_xor, paddle.Tensor.logical_xor] + args_alias : use_default_mapping : True -- op : sqrt - name : [paddle.sqrt,paddle.Tensor.sqrt] - args_alias: - x : [input] - -- op : tril - name : [paddle.tril, paddle.Tensor.tril] - args_alias: - x : [input] - -- op : triu - name : [paddle.triu, paddle.Tensor.triu] - args_alias: - x : [input] - -- op : gelu - name : [paddle.nn.functional.gelu] - args_alias: - x : [input] - args_mapper : - func : GeluMapper +- op : matmul + name : [paddle.matmul, paddle.Tensor.matmul] + args_alias : + use_default_mapping : True -- op : sum - name : [paddle.sum, paddle.Tensor.sum] - args_alias: +- op : maximum + name : [paddle.maximum, paddle.Tensor.maximum] + args_alias : use_default_mapping : True - pre_process: - static_func : SumPreProcess(x, axis) - args_mapper : - func : ArgSumMapper -- op : tanh - name : [paddle.tanh, paddle.Tensor.tanh, paddle.nn.functional.tanh] - args_alias: +- op : minimum + name : [paddle.minimum, paddle.Tensor.minimum] + args_alias : use_default_mapping : True -- op : exp - name : [paddle.exp, paddle.Tensor.exp] - args_alias: +- op : multiply + name : [paddle.multiply, paddle.Tensor.multiply] + args_alias : use_default_mapping : True -- op : expm1 - name : [paddle.expm1, paddle.Tensor.expm1] - args_alias: +- op : nextafter + name : [paddle.nextafter, paddle.Tensor.nextafter] + args_alias : use_default_mapping : True -- op : diag - name : [paddle.diag, paddle.Tensor.diag] - args_alias: - x : [input] - offset: [diagonal] +- op : reciprocal + name : [paddle.reciprocal, paddle.Tensor.reciprocal] + args_alias : + use_default_mapping : True -- op : diagonal - name : [paddle.diagonal, paddle.Tensor.diagonal] +- op : roll + name : [paddle.roll, paddle.Tensor.roll] args_alias: - x : [input] - axis1 : [dim1] - axis2 : [dim2] + axis : [dims] + use_default_mapping : True + pre_process: + func : RollPreProcess(x, shifts, axis) - op : round name : [paddle.round, paddle.Tensor.round] - args_alias: + args_alias : use_default_mapping : True -- op : abs - name : [paddle.abs, paddle.Tensor.abs] - args_alias: +- op : rsqrt + name : [paddle.rsqrt, paddle.Tensor.rsqrt] + args_alias : use_default_mapping : True -- op : index_put - name : [paddle.index_put,paddle.Tensor.index_put] +- op : sigmoid + name : [paddle.sigmoid, paddle.Tensor.sigmoid, paddle.nn.functional.sigmoid] args_alias : - value : [values] use_default_mapping : True -- op : index_put_ - name : [paddle.index_put_,paddle.Tensor.index_put_] +- op : sign + name : [paddle.sign, paddle.Tensor.sign] + args_alias : + use_default_mapping : True + +- op : sin + name : [paddle.sin, paddle.Tensor.sin] + args_alias : + use_default_mapping : True + +- op : sinh + name : [paddle.sinh, paddle.Tensor.sinh] args_alias : - value : [values] use_default_mapping : True - op : softplus @@ -290,37 +359,41 @@ args_alias : use_default_mapping : True -- op : isclose - name : [paddle.isclose, paddle.Tensor.isclose] +- op : sqrt + name : [paddle.sqrt, paddle.Tensor.sqrt] args_alias : use_default_mapping : True - pre_process: - static_func: IsClosePreProcess(x, y, rtol,atol) -- op : grid_sample - name : [paddle.nn.functional.grid_sample] - args_alias: - x : [input] - pre_process: - func : GridSamplePreProcess(x, grid, mode, padding_mode, align_corners) +- op : square + name : [paddle.square, paddle.Tensor.square] + args_alias : + use_default_mapping : True -- op : atanh - name: [paddle.atanh, paddle.Tensor.atanh] +- op : sum + name : [paddle.sum, paddle.Tensor.sum] args_alias : use_default_mapping : True + pre_process : + static_func : SumPreProcess(x, axis) + args_mapper : + func : ArgSumMapper -- op : sinh - name: [paddle.sinh, paddle.Tensor.sinh] +- op : tan + name : [paddle.tan, paddle.Tensor.tan] args_alias : use_default_mapping : True -- op : conj - name: [paddle.conj, paddle.Tensor.conj] +- op : tanh + name : [paddle.tanh, paddle.Tensor.tanh, paddle.nn.functional.tanh] args_alias : use_default_mapping : True -- op : baddbmm - name : [paddle.baddbmm, paddle.Tensor.baddbmm] - args_alias: - x : [batch1] - y : [batch2] +- op : tril + name : [paddle.tril, paddle.Tensor.tril] + args_alias : + use_default_mapping : True + +- op : triu + name : [paddle.triu, paddle.Tensor.triu] + args_alias : + use_default_mapping : True diff --git a/python/paddle/_paddle_docs.py b/python/paddle/_paddle_docs.py index 584aaddacc2605..4d41f2911e355b 100644 --- a/python/paddle/_paddle_docs.py +++ b/python/paddle/_paddle_docs.py @@ -99,7 +99,7 @@ def acos( name: str | None = None, *, out: Tensor | None = None, - ) -> Tensor +) -> Tensor """, ) @@ -137,7 +137,7 @@ def acosh( name: str | None = None, *, out: Tensor | None = None, - ) -> Tensor +) -> Tensor """, ) @@ -175,7 +175,7 @@ def sinh( name: str | None = None, *, out: Tensor | None = None, - ) -> Tensor +) -> Tensor """, ) @@ -322,7 +322,7 @@ def amin( add_doc_and_signature( "amax", - """ + r""" Computes the maximum of tensor elements over the given axis. Note: @@ -463,7 +463,7 @@ def amax( add_doc_and_signature( "all", - """ + r""" Computes the ``logical and`` of tensor elements over the given dimension. Args: @@ -535,9 +535,10 @@ def all( ) -> Tensor """, ) + add_doc_and_signature( "argmax", - """ + r""" Computes the indices of the max elements of the input tensor's element along the provided axis. @@ -576,20 +577,20 @@ def all( >>> out4 = paddle.argmax(x, axis=0, keepdim=True) >>> print(out4.numpy()) [[2 2 0 1]] - """, +""", """ - def argmax( +def argmax( x: Tensor, axis: int | None = None, keepdim: bool = False, dtype: DTypeLike = "int64", name: str | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "argmin", - """ + r""" Computes the indices of the min elements of the input tensor's element along the provided axis. @@ -628,16 +629,55 @@ def argmax( >>> out4 = paddle.argmin(x, axis=0, keepdim=True) >>> print(out4.numpy()) [[1 1 1 2]] - """, +""", """ - def argmin( +def argmin( x: Tensor, axis: int | None = None, keepdim: bool = False, dtype: DTypeLike = "int64", name: str | None = None, ) -> Tensor - """, +""", +) + +add_doc_and_signature( + "atan", + r""" + Arctangent Operator. + + .. math:: + out = tan^{-1}(x) + + Args: + x (Tensor): Input of Atan operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, + uint8, int8, int16, int32, int64, complex64 or complex128. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + Tensor. Same shape and dtype as input x + (integer types are autocasted into float32). + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.atan(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [-0.38050640, -0.19739556, 0.09966865, 0.29145682]) +""", + """ +def atan( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( @@ -674,7 +714,7 @@ def atanh( name: str | None = None, *, out: Tensor | None = None, - ) -> Tensor +) -> Tensor """, ) @@ -724,8 +764,15 @@ def atanh( >>> res Tensor(shape=[1], dtype=float64, place=Place(cpu), stop_gradient=True, [1.]) - """, - "def log2(x: Tensor, name: str | None = None, * , out: Tensor | None = None) -> Tensor", +""", + """ +def log2( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( "log10", @@ -773,9 +820,172 @@ def atanh( >>> res Tensor(shape=[1], dtype=float64, place=Place(cpu), stop_gradient=True, [1.]) - """, - "def log10(x: Tensor, name: str | None = None, * , out: Tensor | None = None) -> Tensor", +""", + """ +def log10( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) +add_doc_and_signature( + "asinh", + r""" + Asinh Activation Operator. + + .. math:: + out = asinh(x) + + Args: + x (Tensor): Input of Asinh operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, + uint8, int8, int16, int32, int64, complex64 or complex128. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + Tensor. Output of Asinh operator, a Tensor with shape same as input + (integer types are autocasted into float32). + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.asinh(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [-0.39003533, -0.19869010, 0.09983408, 0.29567307]) +""", + """ +def asinh( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "reciprocal", + r""" + Reciprocal Activation Operator. + + .. math:: + out = \\frac{1}{x} + + Args: + x (Tensor): Input of Reciprocal operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, + uint8, int8, int16, int32, int64. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + Tensor. Output of Reciprocal operator, a Tensor with shape same as input + (integer types are autocasted into float32). + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.reciprocal(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [-2.50000000, -5. , 10. , 3.33333325]) +""", + """ +def reciprocal( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "square", + r""" + Square each elements of the inputs. + + .. math:: + out = x^2 + + Args: + x (Tensor): Input of Square operator, an N-D Tensor, with data type int32, int64, float32, float64, float16, complex64 or complex128. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + Tensor. Output of Square operator, a Tensor with shape same as input. + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.square(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [0.16000001, 0.04000000, 0.01000000, 0.09000000]) +""", + """ +def square( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "tan", + r""" + Tangent Operator. Computes tangent of x element-wise. + + Input range is `(k*pi-pi/2, k*pi+pi/2)` and output range is `(-inf, inf)`. + + .. math:: + out = tan(x) + + Args: + x (Tensor): Input of Tan operator, an N-D Tensor, with data type float32, float64, float16, + bfloat16, uint8, int8, int16, int32, int64, complex64 or complex128. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + Tensor. Output of Tan operator, a Tensor with shape same as input + (integer types are autocasted into float32). + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.tan(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [-0.42279324, -0.20271003, 0.10033467, 0.30933627]) +""", + """ +def tan( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) + add_doc_and_signature( "log1p", r""" @@ -805,12 +1015,20 @@ def atanh( Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True, [[0. ], [0.69314718]]) - """, - "def log1p(x: Tensor, name: str | None = None, * , out: Tensor | None = None) -> Tensor", +""", + """ +def log1p( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) + add_doc_and_signature( "matmul", - """ + r""" Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. @@ -901,8 +1119,9 @@ def atanh( >>> z = paddle.matmul(x, y) >>> print(z.shape) paddle.Size([10, 3, 5, 5]) - """, - """ def matmul( +""", + """ +def matmul( x: Tensor, y: Tensor, transpose_x: bool = False, @@ -910,11 +1129,12 @@ def atanh( name: str | None = None, *, out: Tensor | None = None, -) -> Tensor""", +) -> Tensor +""", ) add_doc_and_signature( "multiply", - """ + r""" multiply two tensors element-wise. The equation is: .. math:: @@ -957,12 +1177,16 @@ def atanh( [[[2, 4, 6], [2, 4, 6]]]) - """, - """def multiply(x: Tensor, - y: Tensor, - name: str | None = None, - *, - out: Tensor | None = None) -> Tensor""", +""", + """ +def multiply( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None +) -> Tensor +""", ) add_doc_and_signature( "logsumexp", @@ -1014,7 +1238,7 @@ def atanh( Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True, [2.15317822, 3.15684605]) - """, +""", """ def logsumexp( x: Tensor, @@ -1024,11 +1248,11 @@ def logsumexp( *, out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "softplus", - """ + r""" softplus activation .. math:: @@ -1057,16 +1281,19 @@ def logsumexp( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [0.51301527, 0.59813893, 0.74439669, 0.85435522]) - """, +""", """ - def softplus( - x: Tensor, beta: float = 1, threshold: float = 20, name: str | None = None +def softplus( + x: Tensor, + beta: float = 1, + threshold: float = 20, + name: str | None = None, ) -> Tensor """, ) add_doc_and_signature( "isclose", - """ + r""" Check if all :math:`x` and :math:`y` satisfy the condition: .. math:: @@ -1140,7 +1367,7 @@ def softplus( >>> print(result2) Tensor(shape=[2], dtype=bool, place=Place(cpu), stop_gradient=True, [True, True]) - """, +""", """ def isclose( x: Tensor, @@ -1157,7 +1384,7 @@ def isclose( # zhengsheng add_doc_and_signature( "isfinite", - """ + r""" Return whether every element of input tensor is finite number or not. .. note:: @@ -1181,7 +1408,7 @@ def isclose( >>> out Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True, [False, True , True , False, True , False, False]) - """, +""", """ def isfinite( x: Tensor, @@ -1192,7 +1419,7 @@ def isfinite( add_doc_and_signature( "isinf", - """ + r""" Return whether every element of input tensor is `+/-INF` or not. .. note:: @@ -1216,7 +1443,7 @@ def isfinite( >>> out Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True, [True , False, False, True , False, False, False]) - """, +""", """ def isinf( x: Tensor, @@ -1227,7 +1454,7 @@ def isinf( add_doc_and_signature( "isnan", - """ + r""" Return whether every element of input tensor is `NaN` or not. .. note:: @@ -1252,7 +1479,7 @@ def isinf( >>> out Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True, [False, False, False, False, False, True , True ]) - """, +""", """ def isnan( x: Tensor, @@ -1263,7 +1490,7 @@ def isnan( add_doc_and_signature( "roll", - """ + r""" Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that roll beyond the last position are re-introduced at the first according to 'shifts'. If a axis is not specified, @@ -1314,7 +1541,7 @@ def isnan( [[3. 1. 2.] [6. 4. 5.] [9. 7. 8.]] - """, +""", """ def roll( x: Tensor, @@ -1327,7 +1554,7 @@ def roll( add_doc_and_signature( "ceil", - """ + r""" Ceil Operator. Computes ceil of x element-wise. .. math:: @@ -1354,7 +1581,7 @@ def roll( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [-0., -0., 1. , 1. ]) - """, +""", """ def ceil( x: Tensor, @@ -1367,7 +1594,7 @@ def ceil( add_doc_and_signature( "sum", - """ + r""" Computes the sum of tensor elements over the given dimension. .. note:: @@ -1462,7 +1689,7 @@ def ceil( >>> out9 Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True, [4, 0]) - """, +""", """ def sum( x: Tensor, @@ -1478,7 +1705,7 @@ def sum( add_doc_and_signature( "index_put", - """ + r""" Puts values from the tensor values into the tensor x using the indices specified in indices (which is a tuple of Tensors). The expression paddle.index_put_(x, indices, values) is equivalent to tensor[indices] = values. Returns x. If accumulate is True, the elements in values are added to x. If accumulate is False, the behavior is undefined if indices contain duplicate elements. @@ -1516,7 +1743,7 @@ def sum( [[0., 1., 0.], [0., 0., 1.], [0., 1., 0.]]) - """, +""", """ def index_put( x: Tensor, @@ -1530,10 +1757,10 @@ def index_put( add_doc_and_signature( "index_put_", - """ + r""" Inplace version of ``index_put`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_paddle_index_put`. - """, +""", """ def index_put_( x: Tensor, @@ -1548,7 +1775,7 @@ def index_put_( # liuyi add_doc_and_signature( "any", - """ + r""" Computes the ``logical or`` of tensor elements over the given dimension, and return the result. .. note:: @@ -1615,24 +1842,24 @@ def index_put_( [[True], [True]]) - """, - """ - def any( - x: Tensor, - axis: int | Sequence[int] | None = None, - keepdim: bool = False, - name: str | None = None, - *, - out: Tensor | None = None - ) -> Tensor - """, -) -add_doc_and_signature( - "expand_as", +""", """ +def any( + x: Tensor, + axis: int | Sequence[int] | None = None, + keepdim: bool = False, + name: str | None = None, + *, + out: Tensor | None = None +) -> Tensor +""", +) +add_doc_and_signature( + "expand_as", + r""" + + Expand the input tensor ``x`` to the same shape as the input tensor ``y``. - Expand the input tensor ``x`` to the same shape as the input tensor ``y``. - Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greater than or equal to that of ``x``. The dimension to expand must have a value of 0. The following diagram illustrates how a one-dimensional tensor is transformed into a tensor with a shape of [2,3] through the expand_as operation. The target tensor has a shape of [2,3], and through expand_as, the one-dimensional tensor is expanded into a tensor with a shape of [2,3]. @@ -1662,10 +1889,14 @@ def any( Tensor(shape=[2, 3], dtype=int32, place=Place(cpu), stop_gradient=True, [[1, 2, 3], [1, 2, 3]]) - """, +""", """ - def expand_as(x: Tensor, y: Tensor, name: str | None = None) -> Tensor - """, +def expand_as( + x: Tensor, + y: Tensor, + name: str | None = None, +) -> Tensor +""", ) # shenwei @@ -1690,7 +1921,6 @@ def expand_as(x: Tensor, y: Tensor, name: str | None = None) -> Tensor tensor shape will be [N, C, D, H, W]. - Step 1: Get (x, y) grid coordinates and scale to [0, H-1/W-1]. @@ -1801,39 +2031,39 @@ def expand_as(x: Tensor, y: Tensor, name: str | None = None) -> Tensor [[[[ 0.34000000, 0.01600000, 0.08600000, -0.44800000], [ 0.55000000, -0.07600000, 0.35000000, 0.59000000], [ 0.59600000, 0.38000000, 0.52000000, 0.24000000]]]]) - """, +""", """ - def grid_sample( - x: Tensor, - grid: Tensor, - mode: str = 'bilinear', - padding_mode: Literal["zeros", "reflection", "border"] = 'zeros', - align_corners: bool = True, - name: str | None = None, - ) -> Tensor - """, +def grid_sample( + x: Tensor, + grid: Tensor, + mode: str = 'bilinear', + padding_mode: Literal["zeros", "reflection", "border"] = 'zeros', + align_corners: bool = True, + name: str | None = None, +) -> Tensor +""", ) add_doc_and_signature( "gelu", - """ + r""" gelu activation. The activation function of Gelu is calculated element by element. More information refers to :ref: `Gaussian Error Linear Units`. - approximate parameter must be True, False, "tanh", "none". + The approximate parameter must be True, False, "tanh", or "none". - if approximate is True or "tanh" + If approximate is True or "tanh": .. math:: - gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\frac{2}{\\pi}} * (x + 0.044715x^{3}))) + gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3}))) - else + else: .. math:: - gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\\sqrt{2}})) + gelu(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}})) .. note:: Alias Support: The parameter name ``input`` can be used as an alias for ``x``. @@ -1875,14 +2105,14 @@ def grid_sample( Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True, [[-0.15880796, 0.34571400], [ 0.84119201, 1.39957154]]) - """, +""", """ - def gelu( - x: Tensor, - approximate: Literal["tanh", "none"] | bool = False, - name: str | None = None, - ) -> Tensor - """, +def gelu( + x: Tensor, + approximate: Literal["tanh", "none"] | bool = False, + name: str | None = None, +) -> Tensor +""", ) add_doc_and_signature( @@ -1919,21 +2149,21 @@ def gelu( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [0.40131235, 0.45016602, 0.52497917, 0.57444251]) - """, +""", """ - def sigmoid( - x: paddle.Tensor, - name: str | None = None, - *, - out: Tensor | None = None, - ) -> paddle.Tensor - """, +def sigmoid( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) # zhouxin add_doc_and_signature( "greater_than", - """ + r""" Returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`. Note: @@ -1961,17 +2191,21 @@ def sigmoid( >>> print(result1) Tensor(shape=[3], dtype=bool, place=Place(cpu), stop_gradient=True, [False, False, True ]) - """, +""", """ - def greater_than( - x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None - ) -> Tensor - """, +def greater_than( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( "sin", - """ + r""" Sine Activation Operator. .. math:: @@ -1997,17 +2231,20 @@ def greater_than( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [-0.38941833, -0.19866933, 0.09983342, 0.29552022]) - """, +""", """ def sin( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "sign", - """ + r""" Returns sign of every element in `x`: For real numbers, 1 for positive, -1 for negative and 0 for zero. For complex numbers, the return value is a complex number with unit magnitude. If a complex number element is zero, the result is 0+0j. Args: @@ -2028,12 +2265,15 @@ def sin( >>> out Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [ 1., 0., -1., 1.]) - """, +""", """ def sign( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( @@ -2066,17 +2306,20 @@ def sign( Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, [[0.69314718, 1.09861231, 1.38629436], [1.94591010, 2.07944155, 2.19722462]]) - """, +""", """ def log( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "rsqrt", - """ + r""" Rsqrt Activation Operator. Please make sure input is legal in case of numeric errors. @@ -2104,17 +2347,20 @@ def log( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [3.16227770, 2.23606801, 1.82574177, 1.58113885]) - """, +""", """ def rsqrt( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "cos", - """ + r""" Cosine Operator. Computes cosine of x element-wise. Input range is `(-inf, inf)` and output range is `[-1,1]`. @@ -2142,17 +2388,20 @@ def rsqrt( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [0.92106098, 0.98006660, 0.99500418, 0.95533651]) - """, +""", """ def cos( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) add_doc_and_signature( "cosh", - """ + r""" Cosh Activation Operator. Input range `(-inf, inf)`, output range `(1, inf)`. @@ -2180,13 +2429,20 @@ def cos( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [1.08107233, 1.02006674, 1.00500417, 1.04533851]) - """, - "def cosh(x: Tensor, name: str | None = None, *, out: Tensor | None = None) -> Tensor", +""", + """ +def cosh( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( "floor", - """ + r""" Floor Activation Operator. Computes floor of x element-wise. .. math:: @@ -2212,17 +2468,20 @@ def cos( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [-1., -1., 0., 0.]) - """, +""", """ def floor( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor - """, +""", ) # hehongyu add_doc_and_signature( "maximum", - """ + r""" Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is: .. math:: @@ -2277,21 +2536,21 @@ def floor( >>> print(res) Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, [5. , 3. , inf.]) - """, +""", """ - def maximum( - x: Tensor, - y: Tensor, - name: str | None = None, - *, - out: Tensor | None = None, - ) -> Tensor - """, +def maximum( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( "minimum", - """ + r""" Compare two tensors and return a new tensor containing the element-wise minima. The equation is: .. math:: @@ -2346,21 +2605,21 @@ def maximum( >>> print(res) Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, [ 1. , -inf., 5. ]) - """, +""", """ - def minimum( - x: Tensor, - y: Tensor, - name: str | None = None, - *, - out: Tensor | None = None, - ) -> Tensor - """, +def minimum( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", ) add_doc_and_signature( "sqrt", - """ + r""" Sqrt Activation Operator. .. math:: @@ -2385,7 +2644,7 @@ def minimum( >>> print(out) Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, [0.31622776, 0.44721359, 0.54772258, 0.63245553]) - """, +""", """ def sqrt( x: Tensor, @@ -2393,7 +2652,7 @@ def sqrt( *, out: Tensor | None = None, ) -> Tensor - """, +""", ) # lousiyu @@ -2458,7 +2717,7 @@ def sqrt( [[0 , 0 , 0 , 0 ], [5 , 0 , 0 , 0 ], [9 , 10, 0 , 0 ]]) - """, +""", """ def tril( x: Tensor, @@ -2531,8 +2790,7 @@ def tril( [[1 , 2 , 3 , 4 ], [5 , 6 , 7 , 8 ], [0 , 10, 11, 12]]) - - """, +""", """ def triu( x: Tensor, @@ -2546,7 +2804,7 @@ def triu( add_doc_and_signature( "bmm", - """ + r""" Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dimensional and share the same batch size. @@ -2583,8 +2841,7 @@ def triu( [12., 12.]], [[45., 45.], [60., 60.]]]) - - """, +""", """ def bmm( x: Tensor, @@ -2643,14 +2900,18 @@ def bmm( """, """ def logical_and( - x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "logical_or", - """ + r""" ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by @@ -2694,14 +2955,18 @@ def logical_and( """, """ def logical_or( - x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "logical_not", - """ + r""" ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``. Each element of ``out`` is calculated by @@ -2740,7 +3005,10 @@ def logical_or( """, """ def logical_not( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) @@ -2791,14 +3059,18 @@ def logical_not( """, """ def logical_xor( - x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "dot", - """ + r""" This operator calculates inner product for vectors. Note: @@ -2847,7 +3119,11 @@ def logical_xor( """, """ def dot( - x: Tensor, y: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) @@ -2888,14 +3164,17 @@ def dot( """, """ def tanh( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "exp", - """ + r""" Computes exp of x element-wise with a natural number `e` as the base. @@ -2928,14 +3207,17 @@ def tanh( """, """ def exp( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "expm1", - """ + r""" Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base. @@ -2968,13 +3250,16 @@ def exp( """, """ def expm1( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "diag", - """ + r""" If ``x`` is a vector (1-D tensor), a 2-D square tensor with the elements of ``x`` as the diagonal is returned. If ``x`` is a matrix (2-D tensor), a 1-D tensor with the diagonal elements of ``x`` is returned. @@ -3083,7 +3368,7 @@ def diag( ) add_doc_and_signature( "diagonal", - """ + r""" Computes the diagonals of the input tensor x. @@ -3167,7 +3452,7 @@ def diagonal( add_doc_and_signature( "round", - """ + r""" Round the values in the input to the nearest integer value. @@ -3208,14 +3493,18 @@ def diagonal( """, """ def round( - x: Tensor, decimals: int = 0, name: str | None = None, *, out: Tensor | None = None, + x: Tensor, + decimals: int = 0, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) add_doc_and_signature( "abs", - """ + r""" Perform elementwise abs for input `x`. .. math:: @@ -3248,7 +3537,10 @@ def round( """, """ def abs( - x: Tensor, name: str | None = None, *, out: Tensor | None = None + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) @@ -3260,99 +3552,182 @@ def abs( # other add_doc_and_signature( - "asin", - f""" - Arcsine Operator. - - .. math:: - out = sin^{-1}(x) + "nextafter", + r""" + Return the next floating-point value after input towards other, elementwise. + The shapes of input and other must be broadcastable. Args: - x (Tensor): Input of Asin operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, - uint8, int8, int16, int32, int64, complex64 or complex128. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + x (Tensor): An N-D Tensor, the data type is float32, float64. + y (Tensor): An N-D Tensor, the data type is float32, float64. + name(str, optional):Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. Returns: - Tensor. Same shape and data type as input (integer types are autocasted into float32) + out (Tensor): An N-D Tensor, the shape and data type is the same with input. Examples: .. code-block:: python >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.asin(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [-0.41151685, -0.20135793, 0.10016742, 0.30469266]) - """, + >>> out = paddle.nextafter(paddle.to_tensor([1.0,2.0]),paddle.to_tensor([2.0,1.0])) + >>> out + Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True, + [1.00000012, 1.99999988]) +""", """ -def asin( +def nextafter( x: Tensor, - name: str | None = None + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, ) -> Tensor """, ) - add_doc_and_signature( - "allclose", + "angle", r""" - Check if all :math:`x` and :math:`y` satisfy the condition: + Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while + for negative real numbers, the angle is :math:`\pi`, and NaNs are propagated. - .. math:: - \left| x - y \right| \leq atol + rtol \times \left| y \right| + Equation: + .. math:: - elementwise, for all elements of :math:`x` and :math:`y`. This is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if - two tensors are elementwise equal within a tolerance. + angle(x)=arctan2(x.imag, x.real) Args: - x (Tensor): The input tensor, it's data type should be float16, float32, float64. - Alias: ``input``. - y (Tensor): The input tensor, it's data type should be float16, float32, float64. - Alias: ``other``. - rtol (float, optional): The relative tolerance. Default: :math:`1e-5` . - atol (float, optional): The absolute tolerance. Default: :math:`1e-8` . - equal_nan (bool, optional): ${equal_nan_comment}. Default: False. - name (str|None, optional): Name for the operation. For more information, please - refer to :ref:`api_guide_Name`. Default: None. + x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 . + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. Returns: - Tensor: The output tensor, it's data type is bool. + Tensor: An N-D Tensor of real data type with the same precision as that of x's data type. Examples: - .. code-block:: python + .. code-block:: pycon >>> import paddle - >>> x = paddle.to_tensor([10000., 1e-07]) - >>> y = paddle.to_tensor([10000.1, 1e-08]) - >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") - >>> print(result1) - Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, - False) - >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan") - >>> print(result2) - Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, - False) - >>> x = paddle.to_tensor([1.0, float('nan')]) - >>> y = paddle.to_tensor([1.0, float('nan')]) - >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") - >>> print(result1) - Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, - False) - >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan") - >>> print(result2) - Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, - True) - """, + >>> x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32') + >>> y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32') + >>> z = x + 1j * y + >>> z + Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True, + [[(-2.00000000-2.00000000j), (-2.00000000-1.00000000j), + (-2.00000000+0.00000000j), (-2.00000000+1.00000000j)], + [(-1.00000000-2.00000000j), (-1.00000000-1.00000000j), + (-1.00000000+0.00000000j), (-1.00000000+1.00000000j)], + [(0.00000000-2.00000000j) , (0.00000000-1.00000000j) , + (0.00000000+0.00000000j), (0.00000000+1.00000000j)], + [ (1.00000000-2.00000000j), (1.00000000-1.00000000j), + (1.00000000+0.00000000j), (1.00000000+1.00000000j)]]) + + >>> theta = paddle.angle(z) + >>> theta + Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-2.35619450, -2.67794514, 3.14159274, 2.67794514], + [-2.03444386, -2.35619450, 3.14159274, 2.35619450], + [-1.57079637, -1.57079637, 0. , 1.57079637], + [-1.10714877, -0.78539819, 0. , 0.78539819]]) +""", """ -def allclose( +def angle( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "heaviside", + r""" + Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is + + .. math:: + heaviside(x, y)= + \left\{ + \begin{array}{lcl} + 0,& &\text{if} \ x < 0, \\ + y,& &\text{if} \ x = 0, \\ + 1,& &\text{if} \ x > 0. + \end{array} + \right. + + Note: + ``paddle.heaviside`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): The input tensor of Heaviside step function, it's data type should be bfloat16, float16, float32, float64, int32 or int64. + y (Tensor): The tensor that determines a Heaviside step function, it's data type should be bfloat16, float16, float32, float64, int32 or int64. + name (str|None, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. + out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None. + + Returns: + N-D Tensor. A location into which the result is stored. If x and y have different shapes and are broadcastable, the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. + + Examples: + .. code-block:: python + + >>> import paddle + >>> x = paddle.to_tensor([-0.5, 0, 0.5]) + >>> y = paddle.to_tensor([0.1]) + >>> paddle.heaviside(x, y) + Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + [0. , 0.10000000, 1. ]) + >>> x = paddle.to_tensor([[-0.5, 0, 0.5], [-0.5, 0.5, 0]]) + >>> y = paddle.to_tensor([0.1, 0.2, 0.3]) + >>> paddle.heaviside(x, y) + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0. , 0.20000000, 1. ], + [0. , 1. , 0.30000001]]) +""", + """ +def heaviside( x: Tensor, y: Tensor, - rtol: float = 1e-05, - atol: float = 1e-08, - equal_nan: bool = False, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "asin", + r""" + Arcsine Operator. + + .. math:: + out = sin^{-1}(x) + + Args: + x (Tensor): Input of Asin operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, + uint8, int8, int16, int32, int64, complex64 or complex128. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor. Same shape and data type as input (integer types are autocasted into float32) + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.asin(x) + >>> print(out) + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [-0.41151685, -0.20135793, 0.10016742, 0.30469266]) +""", + """ +def asin( + x: Tensor, name: str | None = None ) -> Tensor """, @@ -3398,7 +3773,7 @@ def allclose( [10.50000000, 10.50000000]], [[10.50000000, 10.50000000], [10.50000000, 10.50000000]]]) - """, +""", """ def baddbmm( input: Tensor, @@ -3407,6 +3782,303 @@ def baddbmm( beta: float = 1.0, alpha: float = 1.0, out_dtype: paddle.dtype | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "allclose", + r""" + Check if all :math:`x` and :math:`y` satisfy the condition: + + .. math:: + \left| x - y \right| \leq atol + rtol \times \left| y \right| + + elementwise, for all elements of :math:`x` and :math:`y`. This is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if + two tensors are elementwise equal within a tolerance. + + Args: + x (Tensor): The input tensor, it's data type should be float16, float32, float64. + Alias: ``input``. + y (Tensor): The input tensor, it's data type should be float16, float32, float64. + Alias: ``other``. + rtol (float, optional): The relative tolerance. Default: :math:`1e-5` . + atol (float, optional): The absolute tolerance. Default: :math:`1e-8` . + equal_nan (bool, optional): ${equal_nan_comment}. Default: False. + name (str|None, optional): Name for the operation. For more information, please + refer to :ref:`api_guide_Name`. Default: None. + + Returns: + Tensor: The output tensor, it's data type is bool. + + >>> x = paddle.to_tensor([10000., 1e-07]) + >>> y = paddle.to_tensor([10000.1, 1e-08]) + >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") + >>> print(result1) + Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan") + >>> print(result2) + Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + >>> x = paddle.to_tensor([1.0, float('nan')]) + >>> y = paddle.to_tensor([1.0, float('nan')]) + >>> result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") + >>> print(result1) + Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + False) + >>> result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan") + >>> print(result2) + Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, + True) +""", + """ +def allclose( + x: Tensor, + y: Tensor, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, + name: str | None = None +) -> Tensor +""", +) + +add_doc_and_signature( + "fmax", + r""" + Compares elements at corresponding positions of two tensors and returns a new tensor containing maximum value of element. + If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned. + The equation is: + + .. math:: + out = fmax(x, y) + + Note: + ``paddle.fmax`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. + y (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. +""", + """ +def fmax( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "fmin", + r""" + Compares elements at corresponding positions of two tensors and returns a new tensor containing minimum value of element. + If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned. + The equation is: + + .. math:: + out = fmin(x, y) + + Note: + ``paddle.fmin`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. + y (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. +""", + """ +def fmin( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "bincount", + r""" + Computes frequency of each value in the input tensor. + + Args: + x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. + weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. + minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. + name (str|None, optional): Normally there is no need for user to set this property. + For more information, please refer to :ref:`api_guide_Name`. Default is None. + + Returns: + Tensor: The tensor of frequency. + + Examples: + .. code-block:: python + + >>> import paddle + >>> x = paddle.to_tensor([1, 2, 1, 4, 5]) + >>> result1 = paddle.bincount(x) + >>> print(result1) + Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 2, 1, 0, 1, 1]) + + >>> w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) + >>> result2 = paddle.bincount(x, weights=w) + >>> print(result2) + Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True, + [0. , 2.19999981, 0.40000001, 0. , 0.50000000, 0.50000000]) +""", + """ +def bincount( + x: Tensor, + weights: Tensor | None = None, + minlength: int = 0, + name: str | None = None, +) -> Tensor +""", +) + + +add_doc_and_signature( + "bitwise_and", + r""" + Apply ``bitwise_and`` on Tensor ``X`` and ``Y``. + + .. math:: + Out = X \\& Y + + Note: + ``paddle.bitwise_and`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): Input Tensor of ``bitwise_and``. It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. + y (Tensor): Input Tensor of ``bitwise_and``. It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. + out (Tensor|None, optional): Result of ``bitwise_and``. It is a N-D Tensor with the same data type of input Tensor. Default: None. + name (str|None, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: Result of ``bitwise_and``. It is a N-D Tensor with the same data type of input Tensor. + + Examples: + .. code-block:: python + + >>> import paddle + >>> x = paddle.to_tensor([-5, -1, 1]) + >>> y = paddle.to_tensor([4, 2, -3]) + >>> res = paddle.bitwise_and(x, y) + >>> print(res) + Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 2, 1]) +""", + """ +def bitwise_and( + x: Tensor, + y: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "bitwise_not", + r""" + Apply ``bitwise_not`` on Tensor ``X``. + + .. math:: + Out = \\sim X + + Note: + ``paddle.bitwise_not`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): Input Tensor of ``bitwise_not``. It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. + out (Tensor|None, optional): Result of ``bitwise_not``. It is a N-D Tensor with the same data type of input Tensor. Default: None. + name (str|None, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: Result of ``bitwise_not``. It is a N-D Tensor with the same data type of input Tensor. + + Examples: + .. code-block:: python + + >>> import paddle + >>> x = paddle.to_tensor([-5, -1, 1]) + >>> res = paddle.bitwise_not(x) + >>> print(res) + Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [ 4, 0, -2]) +""", + """ +def bitwise_not( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + +add_doc_and_signature( + "bitwise_xor", + r""" + Apply ``bitwise_xor`` on Tensor ``X`` and ``Y``. + + .. math:: + Out = X ^\\wedge Y + + Note: + ``paddle.bitwise_xor`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . + + .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor + + Args: + x (Tensor): Input Tensor of ``bitwise_xor``. It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. + y (Tensor): Input Tensor of ``bitwise_xor``. It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. + out (Tensor|None, optional): Result of ``bitwise_xor``. It is a N-D Tensor with the same data type of input Tensor. Default: None. + name (str|None, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: Result of ``bitwise_xor``. It is a N-D Tensor with the same data type of input Tensor. + + Examples: + .. code-block:: python + + >>> import paddle + >>> x = paddle.to_tensor([-5, -1, 1]) + >>> y = paddle.to_tensor([4, 2, -3]) + >>> res = paddle.bitwise_xor(x, y) + >>> print(res) + Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, + [-1, -3, -4]) +""", + """ +def bitwise_xor( + x: Tensor, + y: Tensor, name: str | None = None, *, out: Tensor | None = None, @@ -3443,7 +4115,7 @@ def baddbmm( Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True, [[(1-1j), (2-2j), (3-3j)], [(4-4j), (5-5j), (6-6j)]]) - """, +""", """ def conj( x: Tensor, diff --git a/python/paddle/pir/generated_methods_patch.py b/python/paddle/pir/generated_methods_patch.py index 862ff90a7c66b1..a9cff3d7d6fbee 100644 --- a/python/paddle/pir/generated_methods_patch.py +++ b/python/paddle/pir/generated_methods_patch.py @@ -12,10 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from paddle.tensor import magic_method_func + from ..base.dygraph.generated_tensor_methods_patch import methods_map from . import Value def monkey_patch_generated_methods_for_value(): + magic_method_dict = {v: k for k, v in magic_method_func} + for method_name, method in methods_map: setattr(Value, method_name, method) + magic_name = magic_method_dict.get(method_name) + if magic_name: + setattr(Value, magic_name, method) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index a83c8867810c20..70bbade13ee5a8 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -21,8 +21,7 @@ import paddle from paddle import _C_ops -from paddle._C_ops import bmm, diagonal, dot, matmul # noqa: F401 -from paddle.base.libpaddle import DataType +from paddle._C_ops import bincount, bmm, diagonal, dot, matmul # noqa: F401 from paddle.common_ops_import import VarDesc from paddle.tensor.math import broadcast_shape from paddle.utils.decorator_utils import ( @@ -57,8 +56,6 @@ _POrder: TypeAlias = Literal['fro', 'nuc'] -__all__ = [] - # Consistent with kDefaultDim from C++ Backend K_DEFAULT_DIM = 9 @@ -2479,76 +2476,6 @@ def histogram_bin_edges( return paddle.linspace(min, max, bins + 1, name=name) -def bincount( - x: Tensor, - weights: Tensor | None = None, - minlength: int = 0, - name: str | None = None, -) -> Tensor: - """ - Computes frequency of each value in the input tensor. - - Args: - x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. - weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. - minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. - name (str|None, optional): Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name`. Default is None. - - Returns: - Tensor: The tensor of frequency. - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([1, 2, 1, 4, 5]) - >>> result1 = paddle.bincount(x) - >>> print(result1) - Tensor(shape=[6], dtype=int64, place=Place(cpu), stop_gradient=True, - [0, 2, 1, 0, 1, 1]) - - >>> w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) - >>> result2 = paddle.bincount(x, weights=w) - >>> print(result2) - Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True, - [0. , 2.19999981, 0.40000001, 0. , 0.50000000, 0.50000000]) - """ - if x.dtype not in [ - paddle.int32, - paddle.int64, - DataType.INT32, - DataType.INT64, - ]: - raise TypeError("Elements in Input(x) should all be integers") - - if in_dynamic_or_pir_mode(): - return _C_ops.bincount(x, weights, minlength) - else: - helper = LayerHelper('bincount', **locals()) - - check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') - - if weights is not None: - check_variable_and_dtype( - weights, - 'Weights', - ['int32', 'int64', 'float32', 'float64'], - 'bincount', - ) - out = helper.create_variable_for_type_inference(dtype=weights.dtype) - else: - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='bincount', - inputs={'X': x, 'Weights': weights}, - outputs={'Out': out}, - attrs={'minlength': minlength}, - ) - return out - - def mv(x: Tensor, vec: Tensor, name: str | None = None) -> Tensor: """ Performs a matrix-vector product of the matrix x and the vector vec. diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 89b610a37afe85..527c093e7d7a10 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -22,6 +22,9 @@ from paddle import _C_ops from paddle._C_ops import ( # noqa: F401 allclose, + bitwise_and, + bitwise_not, + bitwise_xor, greater_than, isclose, logical_and, @@ -949,53 +952,10 @@ def _bitwise_op( return out -def bitwise_and( - x: Tensor, y: Tensor, out: Tensor | None = None, name: str | None = None -) -> Tensor: - r""" - - Apply ``bitwise_and`` on Tensor ``X`` and ``Y`` . - - .. math:: - Out = X \& Y - - Note: - ``paddle.bitwise_and`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. - y (Tensor): Input Tensor of ``bitwise_and`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. - out (Tensor|None, optional): Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor. Default: None. - name (str|None, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor: Result of ``bitwise_and`` . It is a N-D Tensor with the same data type of input Tensor. - - Examples: - .. code-block:: python - - >>> import paddle - >>> x = paddle.to_tensor([-5, -1, 1]) - >>> y = paddle.to_tensor([4, 2, -3]) - >>> res = paddle.bitwise_and(x, y) - >>> print(res) - Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, - [0, 2, 1]) - """ - if in_dynamic_or_pir_mode() and out is None: - return _C_ops.bitwise_and(x, y) - return _bitwise_op( - op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True - ) - - def __rand__(x: Tensor, y: int | bool): if isinstance(y, (int, bool)): y_tensor = paddle.to_tensor(y, dtype=x.dtype) - return bitwise_and(y_tensor, x, None, None) + return bitwise_and(y_tensor, x) else: raise TypeError( f"unsupported operand type(s) for |: '{type(y).__name__}' and 'Tensor'" @@ -1060,8 +1020,8 @@ def bitwise_or( Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, [-1, -1, -3]) """ - if in_dynamic_or_pir_mode() and out is None: - return _C_ops.bitwise_or(x, y) + if in_dynamic_or_pir_mode(): + return _C_ops.bitwise_or(x, y, out=out) return _bitwise_op( op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True @@ -1099,49 +1059,6 @@ def bitwise_or_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: return _C_ops.bitwise_or_(x, y) -def bitwise_xor( - x: Tensor, y: Tensor, out: Tensor | None = None, name: str | None = None -) -> Tensor: - r""" - - Apply ``bitwise_xor`` on Tensor ``X`` and ``Y`` . - - .. math:: - Out = X ^\wedge Y - - Note: - ``paddle.bitwise_xor`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. - y (Tensor): Input Tensor of ``bitwise_xor`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. - out (Tensor|None, optional): Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor. Default: None. - name (str|None, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor: Result of ``bitwise_xor`` . It is a N-D Tensor with the same data type of input Tensor. - - Examples: - .. code-block:: python - - >>> import paddle - >>> x = paddle.to_tensor([-5, -1, 1]) - >>> y = paddle.to_tensor([4, 2, -3]) - >>> res = paddle.bitwise_xor(x, y) - >>> print(res) - Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, - [-1, -3, -4]) - """ - if in_dynamic_or_pir_mode() and out is None: - return _C_ops.bitwise_xor(x, y) - return _bitwise_op( - op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True - ) - - def __rxor__( x: Tensor, y: int | bool, @@ -1172,48 +1089,6 @@ def bitwise_xor_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: return _C_ops.bitwise_xor_(x, y) -def bitwise_not( - x: Tensor, out: Tensor | None = None, name: str | None = None -) -> Tensor: - r""" - - Apply ``bitwise_not`` on Tensor ``X``. - - .. math:: - Out = \sim X - - Note: - ``paddle.bitwise_not`` supports broadcasting. If you want know more about broadcasting, please refer to please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): Input Tensor of ``bitwise_not`` . It is a N-D Tensor of bool, uint8, int8, int16, int32, int64. - out (Tensor|None, optional): Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor. Default: None. - name (str|None, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor: Result of ``bitwise_not`` . It is a N-D Tensor with the same data type of input Tensor. - - Examples: - .. code-block:: python - - >>> import paddle - >>> x = paddle.to_tensor([-5, -1, 1]) - >>> res = paddle.bitwise_not(x) - >>> print(res) - Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True, - [ 4, 0, -2]) - """ - if in_dynamic_or_pir_mode() and out is None: - return _C_ops.bitwise_not(x) - - return _bitwise_op( - op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False - ) - - @inplace_apis_in_dygraph_only def bitwise_not_(x: Tensor, name: str | None = None) -> Tensor: r""" diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ef4c695bb85262..c55104fc2242f0 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -27,9 +27,13 @@ all, amax, amin, + angle, any, baddbmm, conj, + fmax, + fmin, + heaviside, isfinite, isinf, isnan, @@ -41,6 +45,7 @@ maximum, minimum, multiply, + nextafter, sign, sin, sum, @@ -72,7 +77,6 @@ in_dynamic_or_pir_mode, in_pir_mode, ) -from .creation import _complex_to_real_dtype from .layer_function_generator import generate_layer_fn from .manipulation import cast, cast_ from .ops import ( # noqa: F401 @@ -1425,134 +1429,6 @@ def _divide_with_axis(x, y, axis=-1, name=None): return _elementwise_op(LayerHelper(op_type, **locals())) -def fmax(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: - """ - Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element. - If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned. - The equation is: - - .. math:: - out = fmax(x, y) - - Note: - ``paddle.fmax`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. - y (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. - - Examples: - - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([[1, 2], [7, 8]]) - >>> y = paddle.to_tensor([[3, 4], [5, 6]]) - >>> res = paddle.fmax(x, y) - >>> print(res) - Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True, - [[3, 4], - [7, 8]]) - - >>> x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) - >>> y = paddle.to_tensor([3, 0, 4]) - >>> res = paddle.fmax(x, y) - >>> print(res) - Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, - [[3, 2, 4], - [3, 2, 4]]) - - >>> x = paddle.to_tensor([2, 3, 5], dtype='float32') - >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32') - >>> res = paddle.fmax(x, y) - >>> print(res) - Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, - [2., 3., 5.]) - - >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float32') - >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float32') - >>> res = paddle.fmax(x, y) - >>> print(res) - Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, - [5. , 3. , inf.]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.fmax(x, y) - else: - return _elementwise_op(LayerHelper('elementwise_fmax', **locals())) - - -def fmin(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: - """ - Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element. - If one of them is a nan value, the other value is directly returned, if both are nan values, then the first nan value is returned. - The equation is: - - .. math:: - out = fmin(x, y) - - Note: - ``paddle.fmin`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. - y (Tensor): the input tensor, it's data type should be bfloat16, float16, float32, float64, int32, int64. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. - - Examples: - - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([[1, 2], [7, 8]]) - >>> y = paddle.to_tensor([[3, 4], [5, 6]]) - >>> res = paddle.fmin(x, y) - >>> print(res) - Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True, - [[1, 2], - [5, 6]]) - - >>> x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]]) - >>> y = paddle.to_tensor([3, 0, 4]) - >>> res = paddle.fmin(x, y) - >>> print(res) - Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, - [[[1, 0, 3], - [1, 0, 3]]]) - - >>> x = paddle.to_tensor([2, 3, 5], dtype='float32') - >>> y = paddle.to_tensor([1, float("nan"), float("nan")], dtype='float32') - >>> res = paddle.fmin(x, y) - >>> print(res) - Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, - [1., 3., 5.]) - - >>> x = paddle.to_tensor([5, 3, float("inf")], dtype='float64') - >>> y = paddle.to_tensor([1, -float("inf"), 5], dtype='float64') - >>> res = paddle.fmin(x, y) - >>> print(res) - Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, - [ 1. , -inf., 5. ]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.fmin(x, y) - else: - return _elementwise_op(LayerHelper('elementwise_fmin', **locals())) - - def reduce_as(x: Tensor, target: Tensor, name: str | None = None) -> Tensor: """ Computes the sum of tensor elements make the shape of its result equal to the shape of target. @@ -3335,14 +3211,14 @@ def clip_( Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_paddle_clip`. """ - fmin = float(np.finfo(np.float32).min) - fmax = float(np.finfo(np.float32).max) + min_ = float(np.finfo(np.float32).min) + max_ = float(np.finfo(np.float32).max) if isinstance(min, Variable): min = min.item(0) if isinstance(max, Variable): max = max.item(0) - min = fmin if min is None else min - max = fmax if max is None else max + min = min_ if min is None else min + max = max_ if max is None else max if in_dynamic_mode(): return _C_ops.clip_(x, min, max) @@ -5684,128 +5560,6 @@ def _diff_handler( return last_out -def angle(x: Tensor, name: str | None = None) -> Tensor: - r""" - Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while - for negative real numbers, the angle is :math:`\pi`, and NaNs are propagated.. - - Equation: - .. math:: - - angle(x)=arctan2(x.imag, x.real) - - Args: - x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 . - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor: An N-D Tensor of real data type with the same precision as that of x's data type. - - Examples: - .. code-block:: pycon - - >>> import paddle - - >>> x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32') - >>> y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32') - >>> z = x + 1j * y - >>> z - Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True, - [[(-2.00000000-2.00000000j), (-2.00000000-1.00000000j), - (-2.00000000+0.00000000j), (-2.00000000+1.00000000j)], - [(-1.00000000-2.00000000j), (-1.00000000-1.00000000j), - (-1.00000000+0.00000000j), (-1.00000000+1.00000000j)], - [(0.00000000-2.00000000j) , (0.00000000-1.00000000j) , - (0.00000000+0.00000000j), (0.00000000+1.00000000j)], - [ (1.00000000-2.00000000j), (1.00000000-1.00000000j), - (1.00000000+0.00000000j), (1.00000000+1.00000000j)]]) - - >>> theta = paddle.angle(z) - >>> theta - Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True, - [[-2.35619450, -2.67794514, 3.14159274, 2.67794514], - [-2.03444386, -2.35619450, 3.14159274, 2.35619450], - [-1.57079637, -1.57079637, 0. , 1.57079637], - [-1.10714877, -0.78539819, 0. , 0.78539819]]) - """ - - if in_dynamic_or_pir_mode(): - return _C_ops.angle(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'float32', - 'float64', - 'complex64', - 'complex128', - 'uint16', - ], - 'angle', - ) - op_type = "angle" - helper = LayerHelper(op_type, **locals()) - inputs = {"X": x} - out = helper.create_variable_for_type_inference( - dtype=_complex_to_real_dtype(x.dtype) - ) - outputs = {"Out": out} - helper.append_op(type=op_type, inputs=inputs, outputs=outputs) - return out - - -def heaviside(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: - r""" - Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is - - .. math:: - heaviside(x, y)= - \left\{ - \begin{array}{lcl} - 0,& &\text{if} \ x < 0, \\ - y,& &\text{if} \ x = 0, \\ - 1,& &\text{if} \ x > 0. - \end{array} - \right. - - Note: - ``paddle.heaviside`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ . - - .. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor - - Args: - x (Tensor): The input tensor of Heaviside step function, it's data type should be bfloat16, float16, float32, float64, int32 or int64. - y (Tensor): The tensor that determines a Heaviside step function, it's data type should be bfloat16, float16, float32, float64, int32 or int64. - name (str|None, optional): Name for the operation (optional, default is None). Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. - - Returns: - N-D Tensor. A location into which the result is stored. If x and y have different shapes and are broadcastable, the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. - - Examples: - .. code-block:: python - - >>> import paddle - >>> x = paddle.to_tensor([-0.5, 0, 0.5]) - >>> y = paddle.to_tensor([0.1]) - >>> paddle.heaviside(x, y) - Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, - [0. , 0.10000000, 1. ]) - >>> x = paddle.to_tensor([[-0.5, 0, 0.5], [-0.5, 0.5, 0]]) - >>> y = paddle.to_tensor([0.1, 0.2, 0.3]) - >>> paddle.heaviside(x, y) - Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, - [[0. , 0.20000000, 1. ], - [0. , 1. , 0.30000001]]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.heaviside(x, y) - else: - op_type = 'elementwise_heaviside' - return _elementwise_op(LayerHelper(op_type, **locals())) - - @param_one_alias(["x", "input"]) def frac( x: Tensor, name: str | None = None, *, out: Tensor | None = None @@ -6434,42 +6188,6 @@ def vander( return res -def nextafter(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: - r""" - Return the next floating-point value after input towards other, elementwise. - The shapes of input and other must be broadcastable. - - Args: - x (Tensor): An N-D Tensor, the data type is float32, float64. - y (Tensor): An N-D Tensor, the data type is float32, float64. - name(str, optional):Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - out (Tensor): An N-D Tensor, the shape and data type is the same with input. - - Examples: - .. code-block:: python - - >>> import paddle - >>> out = paddle.nextafter(paddle.to_tensor([1.0,2.0]),paddle.to_tensor([2.0,1.0])) - >>> out - Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True, - [1.00000012, 1.99999988]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.nextafter(x, y) - else: - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'nextafter') - check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'nextafter') - op_type = "nextafter" - helper = LayerHelper(op_type, **locals()) - inputs = {"x": x, "y": y} - out = helper.create_variable_for_type_inference(dtype=paddle.float32) - outputs = {"out": out} - helper.append_op(type=op_type, inputs=inputs, outputs=outputs) - return out - - def i0(x: Tensor, name: str | None = None) -> Tensor: r""" The function used to calculate modified bessel function of order 0. diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index a9c06b4f26780b..c5dfa986087fe4 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -20,6 +20,8 @@ acos, acosh, asin, + asinh, + atan, atanh, ceil, cos, @@ -27,18 +29,20 @@ exp, expm1, floor, + reciprocal, round, rsqrt, sigmoid, sin, sinh, sqrt, + square, + tan, ) from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only from .. import _C_ops -from ..base.data_feeder import check_variable_and_dtype -from ..framework import LayerHelper, in_dynamic_or_pir_mode +from ..framework import in_dynamic_or_pir_mode from .layer_function_generator import ( generate_inplace_fn, generate_layer_fn, @@ -87,282 +91,15 @@ globals()[_OP] = _func -def asinh(x: Tensor, name: str | None = None) -> Tensor: - """ - Asinh Activation Operator. - - .. math:: - out = asinh(x) - - Args: - x (Tensor): Input of Asinh operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, - uint8, int8, int16, int32, int64, complex64 or complex128. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor. Output of Asinh operator, a Tensor with shape same as input - (integer types are autocasted into float32). - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.asinh(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [-0.39003533, -0.19869010, 0.09983408, 0.29567307]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.asinh(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'uint16', - 'float32', - 'float64', - 'uint8', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], - 'asinh', - ) - helper = LayerHelper('asinh', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out}) - return out - - -def atan(x: Tensor, name: str | None = None) -> Tensor: - """ - Arctangent Operator. - - .. math:: - out = tan^{-1}(x) - - Args: - x (Tensor): Input of Atan operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, - uint8, int8, int16, int32, int64, complex64 or complex128. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor. Same shape and dtype as input x - (integer types are autocasted into float32). - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.atan(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [-0.38050640, -0.19739556, 0.09966865, 0.29145682]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.atan(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'uint16', - 'float32', - 'float64', - 'uint8', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], - 'atan', - ) - helper = LayerHelper('atan', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out}) - return out - - -def reciprocal(x: Tensor, name: str | None = None) -> Tensor: - """ - - Reciprocal Activation Operator. - - .. math:: - out = \\frac{1}{x} - - Args: - x (Tensor): Input of Reciprocal operator, an N-D Tensor, with data type float32, float64, float16, bfloat16, - uint8, int8, int16, int32, int64. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor. Output of Reciprocal operator, a Tensor with shape same as input - (integer types are autocasted into float32). - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.reciprocal(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [-2.50000000, -5. , 10. , 3.33333325]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.reciprocal(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'uint16', - 'float32', - 'float64', - 'uint8', - 'int8', - 'int16', - 'int32', - 'int64', - ], - 'reciprocal', - ) - helper = LayerHelper('reciprocal', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type='reciprocal', inputs={"X": x}, outputs={"Out": out} - ) - return out - - @inplace_apis_in_dygraph_only def round_(x, decimals=0, name=None): r""" - Inplace version of ``round`` API, the output Tensor will be inplaced with input ``x``. + Inplace version of ``round`` API, output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_paddle_round`. """ return _C_ops.round_(x, decimals) -def square(x: Tensor, name: str | None = None) -> Tensor: - """ - Square each elements of the inputs. - - .. math:: - out = x^2 - - Args: - x (Tensor): Input of Square operator, an N-D Tensor, with data type int32, int64, float32, float64, float16, complex64 or complex128. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor. Output of Square operator, a Tensor with shape same as input. - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.square(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [0.16000001, 0.04000000, 0.01000000, 0.09000000]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.square(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'int32', - 'int64', - 'float16', - 'float32', - 'float64', - 'complex64', - 'complex128', - ], - 'square', - ) - helper = LayerHelper('square', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='square', inputs={"X": x}, outputs={"Out": out}) - return out - - -def tan(x: Tensor, name: str | None = None) -> Tensor: - """ - Tangent Operator. Computes tangent of x element-wise. - - Input range is `(k*pi-pi/2, k*pi+pi/2)` and output range is `(-inf, inf)`. - - .. math:: - out = tan(x) - - Args: - x (Tensor): Input of Tan operator, an N-D Tensor, with data type float32, float64, float16, - bfloat16, uint8, int8, int16, int32, int64, complex64 or complex128. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor. Output of Tan operator, a Tensor with shape same as input - (integer types are autocasted into float32). - - Examples: - .. code-block:: python - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.tan(x) - >>> print(out) - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [-0.42279324, -0.20271003, 0.10033467, 0.30933627]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.tan(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'uint16', - 'float32', - 'float64', - 'uint8', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], - 'tan', - ) - helper = LayerHelper('tan', **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out}) - return out - - def erf(x: Tensor, name: str | None = None) -> Tensor: r""" The error function. @@ -377,7 +114,7 @@ def erf(x: Tensor, name: str | None = None) -> Tensor: name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: - Tensor. The output of Erf, dtype: float32 or float64 (integer types are autocasted into float32), shape: the same as the input. + Tensor. The output of Erf, dtype: float32 or float64 (integer types are autocasted into float32), shape: same as input. Examples: diff --git a/test/auto_parallel/hybrid_strategy/semi_auto_parallel_mutual_load_between_dynamic_and_static.py b/test/auto_parallel/hybrid_strategy/semi_auto_parallel_mutual_load_between_dynamic_and_static.py index c548c962e4f49b..422604b2090d1d 100644 --- a/test/auto_parallel/hybrid_strategy/semi_auto_parallel_mutual_load_between_dynamic_and_static.py +++ b/test/auto_parallel/hybrid_strategy/semi_auto_parallel_mutual_load_between_dynamic_and_static.py @@ -47,7 +47,8 @@ def create_data_loader(self): return loader def run_dynamic(self, layer, opt, data_loader, is_recompute=False): - loss_fn = nn.MSELoss() + # MSELoss only support pir, but test_save_load_state_dict.py set FLAGS_enable_pir_api=0 + loss_fn = nn.SmoothL1Loss() loss_list = [] for _ in range(5): @@ -65,7 +66,8 @@ def run_dynamic(self, layer, opt, data_loader, is_recompute=False): def run_dy2static(self, layer, opt, data_loader): # create loss - loss_fn = nn.MSELoss() + # MSELoss only support pir, but test_save_load_state_dict.py set FLAGS_enable_pir_api=0 + loss_fn = nn.SmoothL1Loss() dist_loader = dist.shard_dataloader( dataloader=data_loader, meshes=[self.mesh], @@ -114,8 +116,8 @@ def test_dygraph_save_static_load(self): dy2static_opt = paddle.optimizer.SGD( learning_rate=0.1, parameters=dy_layer.parameters() ) - - loss_fn = nn.MSELoss() + # MSELoss only support pir, but test_save_load_state_dict.py set FLAGS_enable_pir_api=0 + loss_fn = nn.SmoothL1Loss() dist_model = dist.to_static( dy_layer, dist_loader, loss_fn, dy2static_opt ) diff --git a/test/auto_parallel/hybrid_strategy/test_save_load_state_dict.py b/test/auto_parallel/hybrid_strategy/test_save_load_state_dict.py index 09f3d77974027d..87236e8fc10e57 100644 --- a/test/auto_parallel/hybrid_strategy/test_save_load_state_dict.py +++ b/test/auto_parallel/hybrid_strategy/test_save_load_state_dict.py @@ -18,6 +18,7 @@ import collective.test_communication_api_base as test_base +# should be set to FLAGS_enable_pir_api=1 os.environ['FLAGS_enable_pir_api'] = '0' diff --git a/test/auto_parallel/semi_auto_parallel_simple_net.py b/test/auto_parallel/semi_auto_parallel_simple_net.py index d096f326428e3a..abf59b37c43ed9 100644 --- a/test/auto_parallel/semi_auto_parallel_simple_net.py +++ b/test/auto_parallel/semi_auto_parallel_simple_net.py @@ -145,7 +145,8 @@ def init_input_data(self): def run_dynamic(self, layer, shard_input=False, is_pp=False): # create loss - loss_fn = nn.MSELoss() + # MSELoss only support pir, but test_save_load_state_dict.py set FLAGS_enable_pir_api=0 + loss_fn = nn.SmoothL1Loss() # run forward and backward if is_pp: input_dist_attr = (self._pp_mesh0, [Shard(0)]) diff --git a/test/auto_parallel/test_api_dist_branch.py b/test/auto_parallel/test_api_dist_branch.py index 008067c56f3171..c15f941c4aae7f 100644 --- a/test/auto_parallel/test_api_dist_branch.py +++ b/test/auto_parallel/test_api_dist_branch.py @@ -146,6 +146,9 @@ def test_bincount_api_for_dist_tensor(self): local_out = paddle.bincount(local_x, weights=local_weight) dist_out = paddle.bincount(dist_x, weights=dist_weight) + # add keywords usage case after fix code gen bug + # dist_out = paddle.bincount(dist_x, weights=dist_weight) + dist_out = paddle.bincount(dist_x, dist_weight) self.check_tensor_eq(local_out, dist_out) diff --git a/test/legacy_test/test_api_compatibility.py b/test/legacy_test/test_api_compatibility.py new file mode 100644 index 00000000000000..92c3f0a98dd9b0 --- /dev/null +++ b/test/legacy_test/test_api_compatibility.py @@ -0,0 +1,1197 @@ +# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle + + +# Edit By AI Agent +# Test nextafter compatibility +class TestNextafterAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randint(0, 8, self.shape).astype(self.dtype) + self.np_y = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.nextafter(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.nextafter(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.nextafter(input=x, other=y) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.nextafter(y, out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.nextafter(y=y) + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.nextafter(x, y, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.nextafter(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.nextafter(x, y) + # Paddle keyword args + out2 = paddle.nextafter(x=x, y=y) + # Torch keyword args + out3 = paddle.nextafter(input=x, other=y) + # Tensor method + out4 = x.nextafter(y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.nextafter(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +# Test angle compatibility +class TestAngleAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'complex64' + self.init_data() + + def init_data(self): + self.np_x_real = np.random.randn(*self.shape).astype('float32') + self.np_x_imag = np.random.randn(*self.shape).astype('float32') + self.np_x = self.np_x_real + 1j * self.np_x_imag + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.angle(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.angle(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.angle(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.angle(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.angle() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.angle(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.angle(self.np_x) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose( + ref_out, out.numpy(), rtol=1e-5, atol=1e-5 + ) + paddle.enable_static() + + +# Edit by AI Agent +# Test atan compatibility +class TestAtanAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randn(*self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + paddle_dygraph_out = [] + + # Position args + out1 = paddle.atan(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.atan(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.atan(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.atan(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.atan() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.atan(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.arctan(self.np_x) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy(), rtol=1e-6) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.atan(x) + # Paddle keyword args + out2 = paddle.atan(x=x) + # Torch keyword args + out3 = paddle.atan(input=x) + # Tensor method + out4 = x.atan() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.arctan(self.np_x) + for out in fetches: + np.testing.assert_allclose(out, ref_out, rtol=1e-6) + + +# Edit by AI Agent +# Test fmax compatibility +class TestFmaxAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randn(*self.shape).astype(self.dtype) + self.np_y = np.random.randn(*self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args + out1 = paddle.fmax(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.fmax(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.fmax(input=x, other=y) + paddle_dygraph_out.append(out3) + + # Test out parameter + out4 = paddle.empty([]) + paddle.fmax(x, y, out=out4) + paddle_dygraph_out.append(out4) + + # Numpy reference output + ref_out = np.fmax(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.fmax(x, y) + # Paddle keyword args + out2 = paddle.fmax(x=x, y=y) + # Torch keyword args + out3 = paddle.fmax(input=x, other=y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3], + ) + ref_out = np.fmax(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +# Edit by AI Agent +# Test fmin compatibility +class TestFminAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randn(*self.shape).astype(self.dtype) + self.np_y = np.random.randn(*self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args + out1 = paddle.fmin(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.fmin(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.fmin(input=x, other=y) + paddle_dygraph_out.append(out3) + + # Test out parameter + out4 = paddle.empty([]) + paddle.fmin(x, y, out=out4) + paddle_dygraph_out.append(out4) + + # Numpy reference output + ref_out = np.fmin(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.fmin(x, y) + # Paddle keyword args + out2 = paddle.fmin(x=x, y=y) + # Torch keyword args + out3 = paddle.fmin(input=x, other=y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3], + ) + ref_out = np.fmin(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +# Edit by AI Agent +# Test bincount compatibility +class TestBincountAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [10] + self.dtype = 'int64' + self.init_data() + + def init_data(self): + self.np_x = np.random.randint(0, 8, self.shape).astype(self.dtype) + self.np_weights = np.random.random(self.shape).astype('float32') + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + weights = paddle.to_tensor(self.np_weights) + paddle_dygraph_out = [] + + # Position args + out1 = paddle.bincount(x) + paddle_dygraph_out.append(out1) + + # Position args with weights + out2 = paddle.bincount(x, weights) + paddle_dygraph_out.append(out2) + + # Position args with weights and minlength + out3 = paddle.bincount(x, weights, 6) + paddle_dygraph_out.append(out3) + + # Paddle keyword args + out4 = paddle.bincount(x=x) + paddle_dygraph_out.append(out4) + + out5 = paddle.bincount(x=x, weights=weights) + paddle_dygraph_out.append(out5) + + out6 = paddle.bincount(x=x, weights=weights, minlength=6) + paddle_dygraph_out.append(out6) + + # Torch keyword args + out7 = paddle.bincount(input=x) + paddle_dygraph_out.append(out7) + + out8 = paddle.bincount(input=x, weights=weights) + paddle_dygraph_out.append(out8) + + out9 = paddle.bincount(input=x, weights=weights, minlength=6) + paddle_dygraph_out.append(out9) + + # Numpy reference outputs + ref_out1 = np.bincount(self.np_x) + ref_out2 = np.bincount(self.np_x, weights=self.np_weights) + ref_out3 = np.bincount(self.np_x, weights=self.np_weights, minlength=6) + + # Verify each output with corresponding reference + np.testing.assert_allclose(ref_out1, out1.numpy()) + np.testing.assert_allclose(ref_out2, out2.numpy()) + np.testing.assert_allclose(ref_out3, out3.numpy()) + np.testing.assert_allclose(ref_out1, out4.numpy()) + np.testing.assert_allclose(ref_out2, out5.numpy()) + np.testing.assert_allclose(ref_out3, out6.numpy()) + np.testing.assert_allclose(ref_out1, out7.numpy()) + np.testing.assert_allclose(ref_out2, out8.numpy()) + np.testing.assert_allclose(ref_out3, out9.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + weights = paddle.static.data( + name="weights", shape=self.shape, dtype='float32' + ) + + # Position args + out1 = paddle.bincount(x) + out2 = paddle.bincount(x, weights) + out3 = paddle.bincount(x, weights, 6) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "weights": self.np_weights}, + fetch_list=[out1, out2, out3], + ) + # Numpy reference outputs + ref_out1 = np.bincount(self.np_x) + ref_out2 = np.bincount(self.np_x, weights=self.np_weights) + ref_out3 = np.bincount( + self.np_x, weights=self.np_weights, minlength=6 + ) + np.testing.assert_allclose(ref_out1, fetches[0]) + np.testing.assert_allclose(ref_out2, fetches[1]) + np.testing.assert_allclose(ref_out3, fetches[2]) + + +# Edit by AI Agent +# Test diag compatibility +class TestDiagAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [3, 3] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randn(*self.shape).astype(self.dtype) + self.np_v = np.random.randn(3).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + v = paddle.to_tensor(self.np_v) + paddle_dygraph_out = [] + + # 1D tensor input (construct diagonal matrix) + out1 = paddle.diag(v) + paddle_dygraph_out.append(out1) + + # 2D tensor input (extract diagonal) + out2 = paddle.diag(x) + paddle_dygraph_out.append(out2) + + # 2D tensor with offset + out3 = paddle.diag(x, 1) + paddle_dygraph_out.append(out3) + + # Paddle keyword args + out4 = paddle.diag(x=x) + paddle_dygraph_out.append(out4) + + out5 = paddle.diag(x=x, offset=1) + paddle_dygraph_out.append(out5) + + # Torch keyword args + out6 = paddle.diag(input=x) + paddle_dygraph_out.append(out6) + + out7 = paddle.diag(input=x, diagonal=1) + paddle_dygraph_out.append(out7) + + # Test out parameter + out8 = paddle.empty([]) + paddle.diag(v, out=out8) + paddle_dygraph_out.append(out8) + + # Verify outputs + np.testing.assert_allclose(np.diag(self.np_v), out1.numpy()) + np.testing.assert_allclose(np.diag(self.np_x), out2.numpy()) + np.testing.assert_allclose(np.diag(self.np_x, 1), out3.numpy()) + np.testing.assert_allclose(np.diag(self.np_v), out8.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + v = paddle.static.data(name="v", shape=[3], dtype=self.dtype) + + # 1D tensor input + out1 = paddle.diag(v) + # 2D tensor input + out2 = paddle.diag(x) + out3 = paddle.diag(x, 1) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "v": self.np_v}, + fetch_list=[out1, out2, out3], + ) + np.testing.assert_allclose(np.diag(self.np_v), fetches[0]) + np.testing.assert_allclose(np.diag(self.np_x), fetches[1]) + np.testing.assert_allclose(np.diag(self.np_x, 1), fetches[2]) + + +# Test heaviside compatibility +class TestHeavisideAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randn(*self.shape).astype(self.dtype) + self.np_y = np.random.randn(*self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.heaviside(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.heaviside(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.heaviside(input=x, values=y) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.heaviside(y, out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.heaviside(y=y) + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.heaviside(x, y, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.heaviside(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.heaviside(x, y) + # Paddle keyword args + out2 = paddle.heaviside(x=x, y=y) + # Torch keyword args + out3 = paddle.heaviside(input=x, values=y) + # Tensor method + out4 = x.heaviside(y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.heaviside(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +class TestAsinhAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.asinh(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.asinh(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.asinh(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method args + out4 = paddle.empty([]) + out5 = x.asinh(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method kwargs + out6 = x.asinh() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.asinh(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.arcsinh(self.np_input) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy(), rtol=1e-6) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.asinh(x) + # Paddle keyword args + out2 = paddle.asinh(x=x) + # Torch keyword args + out3 = paddle.asinh(input=x) + # Tensor method + out4 = x.asinh() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.arcsinh(self.np_input) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +class TestReciprocalAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(1, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.reciprocal(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.reciprocal(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.reciprocal(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method kwargs + out4 = x.reciprocal() + paddle_dygraph_out.append(out4) + + # Test out parameter + out5 = paddle.empty([]) + paddle.reciprocal(x, out=out5) + paddle_dygraph_out.append(out5) + + # Numpy reference output + ref_out = 1.0 / self.np_input + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.reciprocal(x) + # Paddle keyword args + out2 = paddle.reciprocal(x=x) + # Torch keyword args + out3 = paddle.reciprocal(input=x) + # Tensor method + out4 = x.reciprocal() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = 1.0 / self.np_input + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +class TestSquareAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.square(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.square(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.square(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method args + out4 = paddle.empty([]) + out5 = x.square(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method kwargs + out6 = x.square() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.square(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.square(self.np_input) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.square(x) + # Paddle keyword args + out2 = paddle.square(x=x) + # Torch keyword args + out3 = paddle.square(input=x) + # Tensor method + out4 = x.square() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.square(self.np_input) + for out in fetches: + np.testing.assert_allclose(out, ref_out) + + +class TestTanAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_input = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_input) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.tan(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args (kwargs) + out2 = paddle.tan(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.tan(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method args + out4 = paddle.empty([]) + out5 = x.tan(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method kwargs + out6 = x.tan() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.tan(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.tan(self.np_input) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_allclose(ref_out, out.numpy(), rtol=1e-6) + paddle.enable_static() + + def test_static_Compatibility(self): + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.tan(x) + # Paddle keyword args + out2 = paddle.tan(x=x) + # Torch keyword args + out3 = paddle.tan(input=x) + # Tensor method + out4 = x.tan() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_input}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.tan(self.np_input) + for out in fetches: + np.testing.assert_allclose(out, ref_out, rtol=1e-6) + + +# Edit by AI Agent +# Test bitwise_and compatibility +class TestBitwiseAndAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'int32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randint(0, 8, self.shape).astype(self.dtype) + self.np_y = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.bitwise_and(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.bitwise_and(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.bitwise_and(input=x, other=y) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.bitwise_and(y, out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.bitwise_and(y=y) + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.bitwise_and(x, y, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.bitwise_and(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_array_equal(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.bitwise_and(x, y) + # Paddle keyword args + out2 = paddle.bitwise_and(x=x, y=y) + # Torch keyword args + out3 = paddle.bitwise_and(input=x, other=y) + # Tensor method + out4 = x.bitwise_and(y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.bitwise_and(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_array_equal(out, ref_out) + + +# Test bitwise_not compatibility +class TestBitwiseNotAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'int32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.bitwise_not(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.bitwise_not(x=x) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.bitwise_not(input=x) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.bitwise_not(out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.bitwise_not() + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.bitwise_not(x, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.bitwise_not(self.np_x) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_array_equal(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.bitwise_not(x) + # Paddle keyword args + out2 = paddle.bitwise_not(x=x) + # Torch keyword args + out3 = paddle.bitwise_not(input=x) + # Tensor method + out4 = x.bitwise_not() + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.bitwise_not(self.np_x) + for out in fetches: + np.testing.assert_array_equal(out, ref_out) + + +# Test bitwise_xor compatibility +class TestBitwiseXorAPI_Compatibility(unittest.TestCase): + def setUp(self): + np.random.seed(123) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'int32' + self.init_data() + + def init_data(self): + self.np_x = np.random.randint(0, 8, self.shape).astype(self.dtype) + self.np_y = np.random.randint(0, 8, self.shape).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + y = paddle.to_tensor(self.np_y) + paddle_dygraph_out = [] + + # Position args (args) + out1 = paddle.bitwise_xor(x, y) + paddle_dygraph_out.append(out1) + + # Paddle keyword args + out2 = paddle.bitwise_xor(x=x, y=y) + paddle_dygraph_out.append(out2) + + # Torch keyword args + out3 = paddle.bitwise_xor(input=x, other=y) + paddle_dygraph_out.append(out3) + + # Tensor method - args + out4 = paddle.empty([]) + out5 = x.bitwise_xor(y, out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - kwargs + out6 = x.bitwise_xor(y=y) + paddle_dygraph_out.append(out6) + + # Test out parameter + out7 = paddle.empty([]) + paddle.bitwise_xor(x, y, out=out7) + paddle_dygraph_out.append(out7) + + # Numpy reference output + ref_out = np.bitwise_xor(self.np_x, self.np_y) + + # Verify all outputs + for out in paddle_dygraph_out: + np.testing.assert_array_equal(ref_out, out.numpy()) + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.base.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + y = paddle.static.data(name="y", shape=self.shape, dtype=self.dtype) + + # Position args + out1 = paddle.bitwise_xor(x, y) + # Paddle keyword args + out2 = paddle.bitwise_xor(x=x, y=y) + # Torch keyword args + out3 = paddle.bitwise_xor(input=x, other=y) + # Tensor method + out4 = x.bitwise_xor(y) + + exe = paddle.base.Executor(paddle.CPUPlace()) + fetches = exe.run( + main, + feed={"x": self.np_x, "y": self.np_y}, + fetch_list=[out1, out2, out3, out4], + ) + ref_out = np.bitwise_xor(self.np_x, self.np_y) + for out in fetches: + np.testing.assert_array_equal(out, ref_out) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/legacy_test/test_bincount_op.py b/test/legacy_test/test_bincount_op.py index 1b55f47328304e..168f20bb239de4 100644 --- a/test/legacy_test/test_bincount_op.py +++ b/test/legacy_test/test_bincount_op.py @@ -138,6 +138,14 @@ def net_func(): with self.assertRaises(TypeError): self.run_network(net_func) + def test_input_type_errors_static(self): + """Test input tensor should only contain non-negative ints in static graph.""" + paddle.enable_static() + with self.assertRaises(TypeError): + x = paddle.static.data(name='x', dtype='float32', shape=[5]) + paddle.bincount(x) + paddle.disable_static() + def test_weights_shape_error(self): """Test weights tensor should have the same shape as input tensor.""" diff --git a/test/legacy_test/test_bitwise_op.py b/test/legacy_test/test_bitwise_op.py index 475ea94bcca3a9..385e11d84356cc 100644 --- a/test/legacy_test/test_bitwise_op.py +++ b/test/legacy_test/test_bitwise_op.py @@ -27,7 +27,7 @@ class TestBitwiseAnd(OpTest): def setUp(self): self.op_type = "bitwise_and" - self.python_api = paddle.tensor.logic.bitwise_and + self.python_api = paddle.tensor.bitwise_and self.init_dtype() self.init_shape() self.init_bound() @@ -120,7 +120,7 @@ def init_shape(self): class TestBitwiseAndBool(TestBitwiseAnd): def setUp(self): self.op_type = "bitwise_and" - self.python_api = paddle.tensor.logic.bitwise_and + self.python_api = paddle.tensor.bitwise_and self.init_shape() @@ -141,8 +141,8 @@ class TestElementwiseBitwiseAndOp_Stride(OpTest): def setUp(self): self.op_type = "bitwise_and" - self.python_api = paddle.tensor.logic.bitwise_and - self.public_python_api = paddle.tensor.logic.bitwise_and + self.python_api = paddle.tensor.bitwise_and + self.public_python_api = paddle.tensor.bitwise_and self.transpose_api = paddle.transpose self.as_stride_api = paddle.as_strided self.init_dtype() @@ -293,7 +293,7 @@ def init_data(self): class TestBitwiseOr(OpTest): def setUp(self): self.op_type = "bitwise_or" - self.python_api = paddle.tensor.logic.bitwise_or + self.python_api = paddle.tensor.bitwise_or self.init_dtype() self.init_shape() self.init_bound() @@ -386,7 +386,7 @@ def init_shape(self): class TestBitwiseOrBool(TestBitwiseOr): def setUp(self): self.op_type = "bitwise_or" - self.python_api = paddle.tensor.logic.bitwise_or + self.python_api = paddle.tensor.bitwise_or self.init_shape() @@ -407,8 +407,8 @@ class TestElementwiseBitwiseOrOp_Stride(OpTest): def setUp(self): self.op_type = "bitwise_or" - self.python_api = paddle.tensor.logic.bitwise_or - self.public_python_api = paddle.tensor.logic.bitwise_or + self.python_api = paddle.tensor.bitwise_or + self.public_python_api = paddle.tensor.bitwise_or self.transpose_api = paddle.transpose self.as_stride_api = paddle.as_strided self.init_dtype() @@ -559,7 +559,7 @@ def init_data(self): class TestBitwiseXor(OpTest): def setUp(self): self.op_type = "bitwise_xor" - self.python_api = paddle.tensor.logic.bitwise_xor + self.python_api = paddle.tensor.bitwise_xor self.init_dtype() self.init_shape() @@ -653,7 +653,7 @@ def init_shape(self): class TestBitwiseXorBool(TestBitwiseXor): def setUp(self): self.op_type = "bitwise_xor" - self.python_api = paddle.tensor.logic.bitwise_xor + self.python_api = paddle.tensor.bitwise_xor self.init_shape() @@ -674,8 +674,8 @@ class TestElementwiseBitwiseXorOp_Stride(OpTest): def setUp(self): self.op_type = "bitwise_xor" - self.python_api = paddle.tensor.logic.bitwise_xor - self.public_python_api = paddle.tensor.logic.bitwise_xor + self.python_api = paddle.tensor.bitwise_xor + self.public_python_api = paddle.tensor.bitwise_xor self.transpose_api = paddle.transpose self.as_stride_api = paddle.as_strided self.init_dtype() @@ -826,7 +826,7 @@ def init_data(self): class TestBitwiseNot(OpTest): def setUp(self): self.op_type = "bitwise_not" - self.python_api = paddle.tensor.logic.bitwise_not + self.python_api = paddle.tensor.bitwise_not self.init_dtype() self.init_shape() @@ -905,7 +905,7 @@ def init_shape(self): class TestBitwiseNotBool(TestBitwiseNot): def setUp(self): self.op_type = "bitwise_not" - self.python_api = paddle.tensor.logic.bitwise_not + self.python_api = paddle.tensor.bitwise_not self.init_shape() x = np.random.choice([True, False], self.x_shape) diff --git a/test/legacy_test/test_zero_dim_binary_api.py b/test/legacy_test/test_zero_dim_binary_api.py index f1bea84845d137..e4e82a07d7a677 100644 --- a/test/legacy_test/test_zero_dim_binary_api.py +++ b/test/legacy_test/test_zero_dim_binary_api.py @@ -167,7 +167,7 @@ def test_dygraph_binary(self): # 1) x is 0D, y is 0D x_np = np.random.randint(-10, 10, []) y_np = np.random.randint(-10, 10, []) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) @@ -179,7 +179,7 @@ def test_dygraph_binary(self): # 2) x is ND, y is 0D x_np = np.random.randint(-10, 10, [3, 5]) y_np = np.random.randint(-10, 10, []) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) @@ -191,7 +191,7 @@ def test_dygraph_binary(self): # 3) x is 0D , y is ND x_np = np.random.randint(-10, 10, []) y_np = np.random.randint(-10, 10, [3, 5]) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) diff --git a/test/xpu/test_zero_dim_tensor_xpu.py b/test/xpu/test_zero_dim_tensor_xpu.py index 01cf6f78cb19b7..26008d292b9c42 100644 --- a/test/xpu/test_zero_dim_tensor_xpu.py +++ b/test/xpu/test_zero_dim_tensor_xpu.py @@ -345,7 +345,7 @@ def test_dygraph_binary(self): # 1) x is 0D, y is 0D x_np = np.random.randint(-10, 10, []) y_np = np.random.randint(-10, 10, []) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) @@ -357,7 +357,7 @@ def test_dygraph_binary(self): # 2) x is ND, y is 0D x_np = np.random.randint(-10, 10, [3, 5]) y_np = np.random.randint(-10, 10, []) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) @@ -369,7 +369,7 @@ def test_dygraph_binary(self): # 3) x is 0D , y is ND x_np = np.random.randint(-10, 10, []) y_np = np.random.randint(-10, 10, [3, 5]) - out_np = eval(f'np.{api.__name__}(x_np, y_np)') + out_np = eval(f"np.{api.__name__.lstrip('_')}(x_np, y_np)") x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np)