From f4ad0d76d342741518cc427b0a81085b66df72e6 Mon Sep 17 00:00:00 2001 From: co63oc <4617245+co63oc@users.noreply.github.com> Date: Wed, 7 Jan 2026 21:14:24 +0800 Subject: [PATCH 1/2] add phi::StringToDataLayout --- paddle/common/layout.h | 4 +++- paddle/phi/kernels/cpu/batch_norm_kernel.cc | 2 +- paddle/phi/kernels/xpu/affine_channel_kernel.cc | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/paddle/common/layout.h b/paddle/common/layout.h index 4d13690be97672..1fd4be0fa09325 100644 --- a/paddle/common/layout.h +++ b/paddle/common/layout.h @@ -144,7 +144,9 @@ using DataLayout = common::DataLayout; namespace phi { using DataLayout = common::DataLayout; -} +using common::DataLayoutToString; +using common::StringToDataLayout; +} // namespace phi namespace paddle { // In order to be compatible with the original custom operator Tensor interface diff --git a/paddle/phi/kernels/cpu/batch_norm_kernel.cc b/paddle/phi/kernels/cpu/batch_norm_kernel.cc index b3bd79b791c6a6..667d6ddf5da057 100644 --- a/paddle/phi/kernels/cpu/batch_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/batch_norm_kernel.cc @@ -68,7 +68,7 @@ void BatchNormKernel(const Context& dev_ctx, bool global_stats = test_mode || use_global_stats; - auto data_layout = common::StringToDataLayout(data_layout_str); + auto data_layout = StringToDataLayout(data_layout_str); const auto& x_dims = x.dims(); PADDLE_ENFORCE_GE( diff --git a/paddle/phi/kernels/xpu/affine_channel_kernel.cc b/paddle/phi/kernels/xpu/affine_channel_kernel.cc index 4ffd87ed0b7c1b..0a92bd6fdb76b4 100644 --- a/paddle/phi/kernels/xpu/affine_channel_kernel.cc +++ b/paddle/phi/kernels/xpu/affine_channel_kernel.cc @@ -36,7 +36,7 @@ void AffineChannelXPUKernel(const Context& dev_ctx, auto* y = out; dev_ctx.template Alloc(y); - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = x->dims(); int64_t N = dims[0]; From 9532dcbbdd88b6d897b51f1102ddf34ae8e4ac36 Mon Sep 17 00:00:00 2001 From: co63oc <4617245+co63oc@users.noreply.github.com> Date: Thu, 8 Jan 2026 08:18:18 +0800 Subject: [PATCH 2/2] fix --- paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/affine_channel_kernel.cc | 2 +- paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc | 4 ++-- paddle/phi/kernels/cpu/group_norm_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/group_norm_kernel.cc | 2 +- paddle/phi/kernels/cpu/interpolate_grad_kernel.cc | 6 +++--- paddle/phi/kernels/cpu/interpolate_kernel.cc | 6 +++--- paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/temporal_shift_kernel.cc | 2 +- paddle/phi/kernels/funcs/data_layout_transform.h | 2 +- paddle/phi/kernels/funcs/sync_batch_norm_utils.h | 2 +- paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc | 2 +- paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc | 2 +- paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/affine_channel_kernel.cu | 2 +- paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/batch_norm_kernel.cu | 2 +- paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/conv_transpose_kernel.cu | 2 +- .../phi/kernels/gpu/depthwise_conv2d_bias_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu | 2 +- .../phi/kernels/gpu/depthwise_conv3d_bias_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu | 2 +- paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/depthwise_conv_kernel.cu | 2 +- paddle/phi/kernels/gpu/group_norm_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/group_norm_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/interpolate_grad_kernel.cu | 8 ++++---- paddle/phi/kernels/gpu/interpolate_kernel.cu | 8 ++++---- paddle/phi/kernels/gpu/sync_batch_norm_kernel.cu | 2 +- paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/temporal_shift_kernel.cu | 2 +- paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/conv_transpose_kernel_impl.h | 2 +- paddle/phi/kernels/impl/lrn_kernel_impl.h | 4 ++-- paddle/phi/kernels/onednn/conv_grad_kernel.cc | 2 +- paddle/phi/kernels/onednn/conv_kernel.cc | 2 +- paddle/phi/kernels/onednn/conv_transpose_kernel.cc | 2 +- paddle/phi/kernels/onednn/interpolate_kernel.cc | 2 +- paddle/phi/kernels/onednn/pad3d_kernel.cc | 2 +- paddle/phi/kernels/onednn/pool_grad_kernel.cc | 2 +- paddle/phi/kernels/onednn/pool_kernel.cc | 2 +- paddle/phi/kernels/xpu/affine_channel_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/batch_norm_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/batch_norm_kernel.cc | 2 +- paddle/phi/kernels/xpu/grid_sample_kernel.cc | 2 +- paddle/phi/kernels/xpu/group_norm_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/group_norm_kernel.cc | 2 +- paddle/phi/kernels/xpu/interpolate_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/interpolate_kernel.cc | 2 +- paddle/phi/kernels/xpu/temporal_shift_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/temporal_shift_kernel.cc | 2 +- paddle/phi/kernels/xpu/unfold_grad_kernel.cc | 2 +- paddle/phi/kernels/xpu/unfold_kernel.cc | 2 +- 54 files changed, 68 insertions(+), 68 deletions(-) diff --git a/paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc b/paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc index fecdca498cc745..b0a2854f105d80 100644 --- a/paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc @@ -50,7 +50,7 @@ void AffineChannelGradKernel(const Context& dev_ctx, auto* dscale = scale_grad; auto* dbias = bias_grad; - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = x->dims(); int N = static_cast(dims[0]); diff --git a/paddle/phi/kernels/cpu/affine_channel_kernel.cc b/paddle/phi/kernels/cpu/affine_channel_kernel.cc index 32ad061fe56231..318173b8fdfe32 100644 --- a/paddle/phi/kernels/cpu/affine_channel_kernel.cc +++ b/paddle/phi/kernels/cpu/affine_channel_kernel.cc @@ -46,7 +46,7 @@ void AffineChannelKernel(const Context& dev_ctx, auto* y = out; dev_ctx.template Alloc(y); - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = x->dims(); int N = static_cast(dims[0]); diff --git a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc index 1c98c3cd73eae6..212187a8c6fedf 100644 --- a/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc @@ -59,7 +59,7 @@ void BatchNormGradFunctor(const Context& dev_ctx, DenseTensor* bias_grad) { const auto* d_y = &y_grad; - DataLayout data_layout = common::StringToDataLayout(data_layout_str); + DataLayout data_layout = StringToDataLayout(data_layout_str); auto* d_x = x_grad; auto* d_scale = scale_grad; @@ -399,7 +399,7 @@ void BatchNormDoubleGradKernel( "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); - const auto data_layout = common::StringToDataLayout(data_layout_str); + const auto data_layout = StringToDataLayout(data_layout_str); const auto* ddX = x_grad_grad.get_ptr(); const auto* ddScale = scale_grad_grad.get_ptr(); diff --git a/paddle/phi/kernels/cpu/group_norm_grad_kernel.cc b/paddle/phi/kernels/cpu/group_norm_grad_kernel.cc index eddc4157a29240..0aec82720c1882 100644 --- a/paddle/phi/kernels/cpu/group_norm_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/group_norm_grad_kernel.cc @@ -67,7 +67,7 @@ void GroupNormGradKernel(const Context& dev_ctx, } return; } - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); const auto& x_dims = y.dims(); diff --git a/paddle/phi/kernels/cpu/group_norm_kernel.cc b/paddle/phi/kernels/cpu/group_norm_kernel.cc index a7f822b1dbfb55..d50997fc7cbbb9 100644 --- a/paddle/phi/kernels/cpu/group_norm_kernel.cc +++ b/paddle/phi/kernels/cpu/group_norm_kernel.cc @@ -54,7 +54,7 @@ void GroupNormKernel(const Context& dev_ctx, } return; } - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); diff --git a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc index a11b3e983d0b20..dd6badb0234074 100644 --- a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc @@ -406,7 +406,7 @@ static void Interpolate1DCPUBwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -507,7 +507,7 @@ static void Interpolate2DCPUBwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -664,7 +664,7 @@ static void Interpolate3DCPUBwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/cpu/interpolate_kernel.cc b/paddle/phi/kernels/cpu/interpolate_kernel.cc index 58e72b882296a5..a30a64953020c0 100644 --- a/paddle/phi/kernels/cpu/interpolate_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_kernel.cc @@ -557,7 +557,7 @@ static void Interpolate1DCPUFwd( bool align_corners, int align_mode, DenseTensor* output) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -650,7 +650,7 @@ static void Interpolate2DCPUFwd( bool align_corners, int align_mode, DenseTensor* output) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -813,7 +813,7 @@ static void Interpolate3DCPUFwd( bool align_corners, int align_mode, DenseTensor* output) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0; funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc b/paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc index 8563955797a851..dda81e74d8a27f 100644 --- a/paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc @@ -96,7 +96,7 @@ void TemporalShiftGradKernel(const Context& dev_ctx, auto* input_grad = x_grad; auto* output_grad = &out_grad; int t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int nt = static_cast(output_grad->dims()[0]); const int c = static_cast(data_layout == DataLayout::NCHW diff --git a/paddle/phi/kernels/cpu/temporal_shift_kernel.cc b/paddle/phi/kernels/cpu/temporal_shift_kernel.cc index 4681575f553b57..137165b4bf3aee 100644 --- a/paddle/phi/kernels/cpu/temporal_shift_kernel.cc +++ b/paddle/phi/kernels/cpu/temporal_shift_kernel.cc @@ -96,7 +96,7 @@ void TemporalShiftKernel(const Context& dev_ctx, auto* input = &x; auto* output = out; int t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int nt = static_cast(input->dims()[0]); const int c = static_cast( diff --git a/paddle/phi/kernels/funcs/data_layout_transform.h b/paddle/phi/kernels/funcs/data_layout_transform.h index b420e0833e2d0c..1c630a6ea9d656 100644 --- a/paddle/phi/kernels/funcs/data_layout_transform.h +++ b/paddle/phi/kernels/funcs/data_layout_transform.h @@ -44,7 +44,7 @@ inline OneDNNMemoryFormat ToOneDNNFormat(const DataLayout& layout) { default: PADDLE_THROW( errors::InvalidArgument("Fail to convert layout %s to oneDNN format.", - ::common::DataLayoutToString(layout))); + DataLayoutToString(layout))); } } diff --git a/paddle/phi/kernels/funcs/sync_batch_norm_utils.h b/paddle/phi/kernels/funcs/sync_batch_norm_utils.h index cdedde372900fd..84918aeddb6425 100644 --- a/paddle/phi/kernels/funcs/sync_batch_norm_utils.h +++ b/paddle/phi/kernels/funcs/sync_batch_norm_utils.h @@ -425,7 +425,7 @@ void SyncBatchNormGradFunctor( DenseTensor *bias_grad) { double epsilon = static_cast(epsilon_f); - const DataLayout layout = common::StringToDataLayout(data_layout_str); + const DataLayout layout = StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; diff --git a/paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc b/paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc index f40818785d1a90..75162275790d8b 100644 --- a/paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc +++ b/paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc @@ -144,7 +144,7 @@ KernelKey ConvGetKernelTypeForVar(const GetKernelTypeForVarContext* ctx) { (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_format); + auto dl = StringToDataLayout(data_format); // Some models may have intentionally set "AnyLayout" for conv // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc b/paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc index 8b4c36cc004eba..5c814310c5623b 100644 --- a/paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc +++ b/paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc @@ -33,7 +33,7 @@ void BNActXPUKernel(const Context& dev_ctx, int act_type, DenseTensor* y) { using XPUType = typename XPUTypeTrait::Type; - const auto data_layout = common::StringToDataLayout(data_layout_str); + const auto data_layout = StringToDataLayout(data_layout_str); PADDLE_ENFORCE_EQ(data_layout_str == "NCHW" || data_layout_str == "NHWC", true, common::errors::InvalidArgument( diff --git a/paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu b/paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu index 9a67783593aced..f8525200dd4082 100644 --- a/paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu @@ -98,7 +98,7 @@ void AffineChannelGradCUDAKernel(const Context& dev_ctx, auto* dscale = scale_grad; auto* dbias = bias_grad; - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = dy->dims(); const int64_t num = dy->numel(); diff --git a/paddle/phi/kernels/gpu/affine_channel_kernel.cu b/paddle/phi/kernels/gpu/affine_channel_kernel.cu index 62046e295d2dc6..f06595e4237f35 100644 --- a/paddle/phi/kernels/gpu/affine_channel_kernel.cu +++ b/paddle/phi/kernels/gpu/affine_channel_kernel.cu @@ -56,7 +56,7 @@ void AffineChannelCUDAKernel(const Context& dev_ctx, auto* y = out; dev_ctx.template Alloc(y); - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = x->dims(); const int64_t num = x->numel(); diff --git a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu index a01bfbc5960d47..47525ef677972d 100644 --- a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu @@ -533,7 +533,7 @@ void BatchNormGradFunctor(const Context &dev_ctx, DenseTensor *bias_grad) { double epsilon = static_cast(epsilon_f); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; @@ -1415,7 +1415,7 @@ void BatchNormDoubleGradKernel( "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const DenseTensor *running_mean = nullptr; const DenseTensor *running_variance = nullptr; diff --git a/paddle/phi/kernels/gpu/batch_norm_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_kernel.cu index 357e63d518a4e1..f3e01e2ae250cd 100644 --- a/paddle/phi/kernels/gpu/batch_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_kernel.cu @@ -570,7 +570,7 @@ void BatchNormKernel(const Context &dev_ctx, } double epsilon = epsilon_f; const bool trainable_stats = trainable_statistics; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_stats); // Get the size for each dimension. diff --git a/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu index ac8b6bfbd461fd..be96b8e11857f5 100644 --- a/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu @@ -72,7 +72,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& dev_ctx, const std::string& data_format, DenseTensor* dx, DenseTensor* dfilter) { - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); DenseTensor filter_ = filter; if (!dx && !dfilter) { diff --git a/paddle/phi/kernels/gpu/conv_transpose_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu index 6bfc1a6d34e915..f226f43bb691e0 100644 --- a/paddle/phi/kernels/gpu/conv_transpose_kernel.cu +++ b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu @@ -42,7 +42,7 @@ void DepthwiseConv2dTransposeKernel(const Context& dev_ctx, dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out); return; } - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); DenseTensor filter_ = filter; dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/gpu/depthwise_conv2d_bias_grad_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv2d_bias_grad_kernel.cu index d6e8246eb77d69..580f5551010df9 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv2d_bias_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv2d_bias_grad_kernel.cu @@ -485,7 +485,7 @@ void DepthwiseConv2dBiasGradKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu index 4e86c35843e9ce..82e6772d0443e5 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu @@ -384,7 +384,7 @@ void DepthwiseConv2dBiasKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/depthwise_conv3d_bias_grad_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv3d_bias_grad_kernel.cu index 5cba151c29e3fc..e3c5e8cd966a3c 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv3d_bias_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv3d_bias_grad_kernel.cu @@ -664,7 +664,7 @@ void DepthwiseConv3dBiasGradKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NDHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu index 0d76b9822df958..cc914354e271de 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu @@ -329,7 +329,7 @@ void DepthwiseConv3dBiasKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NDHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu index 10cd523a2f8483..f4df79d4f6df9a 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu @@ -99,7 +99,7 @@ void DepthwiseConvGradKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/depthwise_conv_kernel.cu b/paddle/phi/kernels/gpu/depthwise_conv_kernel.cu index e624793a4d1cd4..bfa70ed604cebb 100644 --- a/paddle/phi/kernels/gpu/depthwise_conv_kernel.cu +++ b/paddle/phi/kernels/gpu/depthwise_conv_kernel.cu @@ -100,7 +100,7 @@ void DepthwiseConvKernel(const Context& dev_ctx, auto filter_dims = filter.dims(); DDim in_data_dims; - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); if (data_layout != DataLayout::NHWC) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); } else { diff --git a/paddle/phi/kernels/gpu/group_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/group_norm_grad_kernel.cu index 43cf76a477d5e4..df9fdc5a18d292 100644 --- a/paddle/phi/kernels/gpu/group_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/group_norm_grad_kernel.cu @@ -325,7 +325,7 @@ void GroupNormGradKernel(const Context& dev_ctx, return; } using AccT = typename phi::dtype::MPTypeTrait::Type; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); diff --git a/paddle/phi/kernels/gpu/group_norm_kernel.cu b/paddle/phi/kernels/gpu/group_norm_kernel.cu index c25a8ead4c3b72..6f11910d5fbd7a 100644 --- a/paddle/phi/kernels/gpu/group_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/group_norm_kernel.cu @@ -727,7 +727,7 @@ void GroupNormNDHWCKernel(const Context& dev_ctx, DenseTensor* residual_out, DenseTensor* mean, DenseTensor* var) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); if (data_layout != DataLayout::NHWC) { PD_THROW("data_layout only supports NHWC and NDHWC"); } @@ -1123,7 +1123,7 @@ void GroupNormGeneralCaseKernel(const Context& dev_ctx, DenseTensor* mean, DenseTensor* var) { using AccT = typename phi::dtype::MPTypeTrait::Type; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); const auto x_dims = x.dims(); diff --git a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu index 09868db6f9b275..1c3eeb9ffb31ee 100644 --- a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu @@ -1208,7 +1208,7 @@ static void Interpolate1DCUDABwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1317,7 +1317,7 @@ static void Interpolate2DCUDABwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1591,7 +1591,7 @@ static void InterpolateAA2DCUDABwd( dev_ctx.template Alloc(input_grad); return; } - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1812,7 +1812,7 @@ static void Interpolate3DCUDABwd( bool align_corners, int align_mode, DenseTensor* input_grad) { - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/gpu/interpolate_kernel.cu b/paddle/phi/kernels/gpu/interpolate_kernel.cu index 4da595d4f1d9d6..550d9658222fbf 100644 --- a/paddle/phi/kernels/gpu/interpolate_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_kernel.cu @@ -1010,7 +1010,7 @@ static void Interpolate1DCUDAFwd( DenseTensor* output) { auto* input_data = input.data(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1121,7 +1121,7 @@ static void Interpolate2DCUDAFwd( DenseTensor* output) { auto* input_data = input.data(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1367,7 +1367,7 @@ static void InterpolateAA2DCUDAFwd( } auto* input_data = input.data(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); @@ -1601,7 +1601,7 @@ static void Interpolate3DCUDAFwd( DenseTensor* output) { auto* input_data = input.data(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/gpu/sync_batch_norm_kernel.cu b/paddle/phi/kernels/gpu/sync_batch_norm_kernel.cu index e5023b47d63ffe..4f64e05247481b 100644 --- a/paddle/phi/kernels/gpu/sync_batch_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/sync_batch_norm_kernel.cu @@ -48,7 +48,7 @@ void SyncBatchNormKernel(const Context& dev_ctx, double epsilon = epsilon_f; const bool trainable_stats = trainable_statistics; - const DataLayout layout = common::StringToDataLayout(data_layout_str); + const DataLayout layout = StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_statistics); const auto& x_dims = x.dims(); PADDLE_ENFORCE_GE(x_dims.size(), diff --git a/paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu b/paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu index 994ccf5e2e3f2f..c7f458d085bda3 100644 --- a/paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu @@ -102,7 +102,7 @@ void TemporalShiftGradKernel(const Context& dev_ctx, auto* input_grad = x_grad; auto* output_grad = &out_grad; int t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int64_t nt = output_grad->dims()[0]; const int64_t c = (data_layout == DataLayout::NCHW ? output_grad->dims()[1] diff --git a/paddle/phi/kernels/gpu/temporal_shift_kernel.cu b/paddle/phi/kernels/gpu/temporal_shift_kernel.cu index 6218fa6a92b0f8..dce86d09243a98 100644 --- a/paddle/phi/kernels/gpu/temporal_shift_kernel.cu +++ b/paddle/phi/kernels/gpu/temporal_shift_kernel.cu @@ -102,7 +102,7 @@ void TemporalShiftKernel(const Context& dev_ctx, auto* input = &x; auto* output = out; int t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int64_t nt = input->dims()[0]; const int64_t c = diff --git a/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h index f133c34ad1f96f..6c743ed4eeef73 100644 --- a/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h @@ -40,7 +40,7 @@ void ConvTransposeGradRawKernel(const Context& dev_ctx, const std::string& data_format, DenseTensor* dx, DenseTensor* dfilter) { - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); // For filter, we do not use const pointer because we will do reshape, // but we should avoid modifying its value. DenseTensor filter_ = filter; diff --git a/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h index 0af28152669713..96f178dcbbdb25 100644 --- a/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h +++ b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h @@ -43,7 +43,7 @@ void ConvTransposeRawKernel(const Context& dev_ctx, dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out); return; } - const DataLayout data_layout = common::StringToDataLayout(data_format); + const DataLayout data_layout = StringToDataLayout(data_format); // The filter will be reshaped, so it should not be constant DenseTensor filter_ = filter; std::vector paddings_ = paddings; diff --git a/paddle/phi/kernels/impl/lrn_kernel_impl.h b/paddle/phi/kernels/impl/lrn_kernel_impl.h index 01411d6ece35cd..92b41c5c9c27df 100644 --- a/paddle/phi/kernels/impl/lrn_kernel_impl.h +++ b/paddle/phi/kernels/impl/lrn_kernel_impl.h @@ -57,7 +57,7 @@ void LRNKernel(const Context& dev_ctx, auto x_dims = x.dims(); const std::string data_layout_str = data_format; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); // NCHW int64_t N = x_dims[0]; int64_t C = (data_layout != DataLayout::NHWC ? x_dims[1] : x_dims[3]); @@ -145,7 +145,7 @@ void LRNGradKernel(const Context& dev_ctx, const DenseTensor& out_g = out_grad; const DenseTensor& mid = mid_out; const std::string data_layout_str = data_format; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); auto x_g = x_grad; dev_ctx.template Alloc(x_g); diff --git a/paddle/phi/kernels/onednn/conv_grad_kernel.cc b/paddle/phi/kernels/onednn/conv_grad_kernel.cc index 5d1c04b14637cd..759d6f578e720b 100644 --- a/paddle/phi/kernels/onednn/conv_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_grad_kernel.cc @@ -240,7 +240,7 @@ KernelKey ConvGradGetKernelTypeForVar(const GetKernelTypeForVarContext* ctx) { (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_format); + auto dl = StringToDataLayout(data_format); // Some models may have intentionally set "AnyLayout" for pool // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/onednn/conv_kernel.cc b/paddle/phi/kernels/onednn/conv_kernel.cc index 4cb65603e24f3a..94be9c33f26697 100644 --- a/paddle/phi/kernels/onednn/conv_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_kernel.cc @@ -130,7 +130,7 @@ KernelKey ConvGetKernelTypeForVar(const GetKernelTypeForVarContext* ctx) { (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_format); + auto dl = StringToDataLayout(data_format); // Some models may have intentionally set "AnyLayout" for conv // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/onednn/conv_transpose_kernel.cc b/paddle/phi/kernels/onednn/conv_transpose_kernel.cc index d3f3692a4e70b5..f83309ccf81742 100644 --- a/paddle/phi/kernels/onednn/conv_transpose_kernel.cc +++ b/paddle/phi/kernels/onednn/conv_transpose_kernel.cc @@ -612,7 +612,7 @@ KernelKey ConvTransposeGetKernelTypeForVar( (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_format); + auto dl = StringToDataLayout(data_format); // Some models may have intentionally set "AnyLayout" for pool // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/onednn/interpolate_kernel.cc b/paddle/phi/kernels/onednn/interpolate_kernel.cc index 9f6a14fe98fcbd..d278b0f4e3e4e9 100644 --- a/paddle/phi/kernels/onednn/interpolate_kernel.cc +++ b/paddle/phi/kernels/onednn/interpolate_kernel.cc @@ -33,7 +33,7 @@ KernelKey InterpolateGetKernelTypeForVar( (tensor.layout() != DataLayout::ONEDNN)) { auto it = attrs.find("data_layout"); const std::string data_layout = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_layout); + auto dl = StringToDataLayout(data_layout); // Some models may have intentionally set "AnyLayout" for pool // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/onednn/pad3d_kernel.cc b/paddle/phi/kernels/onednn/pad3d_kernel.cc index c95eaafe7670f7..7d87179bfb21dc 100644 --- a/paddle/phi/kernels/onednn/pad3d_kernel.cc +++ b/paddle/phi/kernels/onednn/pad3d_kernel.cc @@ -30,7 +30,7 @@ KernelKey Pad3dGetKernelTypeForVar(const GetKernelTypeForVarContext* dev_ctx) { auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); return phi::KernelKey(tensor.place(), - common::StringToDataLayout(data_format), + StringToDataLayout(data_format), expected_kernel_type.dtype()); } #endif diff --git a/paddle/phi/kernels/onednn/pool_grad_kernel.cc b/paddle/phi/kernels/onednn/pool_grad_kernel.cc index 7ddbba9b369184..0e328a2f8e529c 100644 --- a/paddle/phi/kernels/onednn/pool_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/pool_grad_kernel.cc @@ -98,7 +98,7 @@ phi::KernelKey PoolOpGradGetKernelTypeForVar( auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); return phi::KernelKey(tensor.place(), - common::StringToDataLayout(data_format), + StringToDataLayout(data_format), expected_kernel_type.dtype()); } #endif diff --git a/paddle/phi/kernels/onednn/pool_kernel.cc b/paddle/phi/kernels/onednn/pool_kernel.cc index 80061b82d1d2d6..884046294d4bc4 100644 --- a/paddle/phi/kernels/onednn/pool_kernel.cc +++ b/paddle/phi/kernels/onednn/pool_kernel.cc @@ -96,7 +96,7 @@ phi::KernelKey PoolOpGetKernelTypeForVar( const AttributeMap& attrs = dev_ctx->GetAttrs(); auto it = attrs.find("data_format"); const std::string data_format = PADDLE_GET_CONST(std::string, it->second); - auto dl = common::StringToDataLayout(data_format); + auto dl = StringToDataLayout(data_format); // Some models may have intentionally set "AnyLayout" for pool // op. Treat this as NCHW (default data_format value) if (dl != DataLayout::ANY) { diff --git a/paddle/phi/kernels/xpu/affine_channel_grad_kernel.cc b/paddle/phi/kernels/xpu/affine_channel_grad_kernel.cc index b0a9cdbece78e0..b28188ee5a3fdd 100644 --- a/paddle/phi/kernels/xpu/affine_channel_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/affine_channel_grad_kernel.cc @@ -40,7 +40,7 @@ void AffineChannelGradXPUKernel(const Context& dev_ctx, auto* dscale = scale_grad; auto* dbias = bias_grad; - const DataLayout layout = common::StringToDataLayout(data_layout); + const DataLayout layout = StringToDataLayout(data_layout); auto dims = x->dims(); int64_t N = dims[0]; diff --git a/paddle/phi/kernels/xpu/batch_norm_grad_kernel.cc b/paddle/phi/kernels/xpu/batch_norm_grad_kernel.cc index 8cd35872a67adf..633609819ea27c 100644 --- a/paddle/phi/kernels/xpu/batch_norm_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/batch_norm_grad_kernel.cc @@ -114,7 +114,7 @@ void BatchNormGradKernel(const Context &dev_ctx, "But received 'data_layout' is [%s].", data_layout)); - const auto data_layout_val = common::StringToDataLayout(data_layout); + const auto data_layout_val = StringToDataLayout(data_layout); use_global_stats = is_test || use_global_stats; diff --git a/paddle/phi/kernels/xpu/batch_norm_kernel.cc b/paddle/phi/kernels/xpu/batch_norm_kernel.cc index 0dcc5a6cf82073..c457b146efd141 100644 --- a/paddle/phi/kernels/xpu/batch_norm_kernel.cc +++ b/paddle/phi/kernels/xpu/batch_norm_kernel.cc @@ -55,7 +55,7 @@ void BatchNormKernel(const Context& dev_ctx, using XPUType = typename XPUTypeTrait::Type; bool test_mode = is_test && (!trainable_statistics); bool global_stats = test_mode || use_global_stats; - const auto data_layout = common::StringToDataLayout(data_layout_str); + const auto data_layout = StringToDataLayout(data_layout_str); PADDLE_ENFORCE_EQ(data_layout_str == "NCHW" || data_layout_str == "NHWC", true, common::errors::InvalidArgument( diff --git a/paddle/phi/kernels/xpu/grid_sample_kernel.cc b/paddle/phi/kernels/xpu/grid_sample_kernel.cc index d05f4f988fabb6..08fc51abf7c24d 100644 --- a/paddle/phi/kernels/xpu/grid_sample_kernel.cc +++ b/paddle/phi/kernels/xpu/grid_sample_kernel.cc @@ -35,7 +35,7 @@ void GridSampleKernel(const Context& dev_ctx, // attrs // paddle.nn.functional.grid_sample(x, grid, mode='bilinear', // padding_mode='zeros', align_corners=True, name=None) - const std::string data_format = common::DataLayoutToString(x.layout()); + const std::string data_format = DataLayoutToString(x.layout()); // attr to real param bool is_nearest_bool; diff --git a/paddle/phi/kernels/xpu/group_norm_grad_kernel.cc b/paddle/phi/kernels/xpu/group_norm_grad_kernel.cc index 1aeb7ab6273acc..37a1b3e189c4d6 100644 --- a/paddle/phi/kernels/xpu/group_norm_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/group_norm_grad_kernel.cc @@ -68,7 +68,7 @@ void GroupNormGradKernel(const Context& dev_ctx, using XPUType = typename XPUTypeTrait::Type; xpu::ctx_guard RAII_GUARD(dev_ctx.x_context()); int ret = 0; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); const auto x_dims = common::vectorize(x.dims()); diff --git a/paddle/phi/kernels/xpu/group_norm_kernel.cc b/paddle/phi/kernels/xpu/group_norm_kernel.cc index 72319aefb828d3..531f3b8591ce2a 100644 --- a/paddle/phi/kernels/xpu/group_norm_kernel.cc +++ b/paddle/phi/kernels/xpu/group_norm_kernel.cc @@ -51,7 +51,7 @@ void GroupNormKernel(const Context& dev_ctx, } using XPUType = typename XPUTypeTrait::Type; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); const auto scale_ptr = scale.get_ptr(); const auto bias_ptr = bias.get_ptr(); diff --git a/paddle/phi/kernels/xpu/interpolate_grad_kernel.cc b/paddle/phi/kernels/xpu/interpolate_grad_kernel.cc index 27ea33ccd0cdf0..ece5eeca9ba6af 100644 --- a/paddle/phi/kernels/xpu/interpolate_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/interpolate_grad_kernel.cc @@ -43,7 +43,7 @@ void InterpolateGradKernel( dev_ctx.template Alloc(x_grad); return; } - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/xpu/interpolate_kernel.cc b/paddle/phi/kernels/xpu/interpolate_kernel.cc index d655b03a035bb6..87fc58025ae6e6 100644 --- a/paddle/phi/kernels/xpu/interpolate_kernel.cc +++ b/paddle/phi/kernels/xpu/interpolate_kernel.cc @@ -43,7 +43,7 @@ void InterpolateKernel( return; } using XPUType = typename XPUTypeTrait::Type; - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + const DataLayout data_layout = StringToDataLayout(data_layout_str); int64_t n, c, in_d, in_h, in_w; funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); diff --git a/paddle/phi/kernels/xpu/temporal_shift_grad_kernel.cc b/paddle/phi/kernels/xpu/temporal_shift_grad_kernel.cc index 81b1068f598e2e..abae74903c8b27 100644 --- a/paddle/phi/kernels/xpu/temporal_shift_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/temporal_shift_grad_kernel.cc @@ -35,7 +35,7 @@ void TemporalShiftGradKernel(const Context& dev_ctx, auto* input_grad = x_grad; auto* output_grad = &out_grad; int64_t t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int64_t nt = output_grad->dims()[0]; const int64_t n = nt / t; diff --git a/paddle/phi/kernels/xpu/temporal_shift_kernel.cc b/paddle/phi/kernels/xpu/temporal_shift_kernel.cc index 9a0490381ac1ba..9ac64b1126b50e 100644 --- a/paddle/phi/kernels/xpu/temporal_shift_kernel.cc +++ b/paddle/phi/kernels/xpu/temporal_shift_kernel.cc @@ -35,7 +35,7 @@ void TemporalShiftKernel(const Context& dev_ctx, auto* input = &x; auto* output = out; int64_t t = seg_num; - const DataLayout data_layout = common::StringToDataLayout(data_format_str); + const DataLayout data_layout = StringToDataLayout(data_format_str); const int64_t nt = input->dims()[0]; const int64_t n = nt / t; diff --git a/paddle/phi/kernels/xpu/unfold_grad_kernel.cc b/paddle/phi/kernels/xpu/unfold_grad_kernel.cc index 58f576433cee58..3195c7840a25ad 100644 --- a/paddle/phi/kernels/xpu/unfold_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/unfold_grad_kernel.cc @@ -34,7 +34,7 @@ void UnfoldGradKernel(const Context& dev_ctx, if (x_grad->numel() == 0) { return; } - const std::string data_format = common::DataLayoutToString(x.layout()); + const std::string data_format = DataLayoutToString(x.layout()); bool is_nchw = data_format == "NCHW"; PADDLE_ENFORCE_EQ(is_nchw, true, diff --git a/paddle/phi/kernels/xpu/unfold_kernel.cc b/paddle/phi/kernels/xpu/unfold_kernel.cc index c04f4813c29963..6d7916c1def0f8 100644 --- a/paddle/phi/kernels/xpu/unfold_kernel.cc +++ b/paddle/phi/kernels/xpu/unfold_kernel.cc @@ -33,7 +33,7 @@ void UnfoldKernel(const Context& dev_ctx, if (out->numel() == 0) { return; } - const std::string data_format = common::DataLayoutToString(x.layout()); + const std::string data_format = DataLayoutToString(x.layout()); bool is_nchw = data_format == "NCHW"; PADDLE_ENFORCE_EQ(is_nchw, true,