Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion paddle/common/layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,9 @@ using DataLayout = common::DataLayout;

namespace phi {
using DataLayout = common::DataLayout;
}
using common::DataLayoutToString;
using common::StringToDataLayout;
} // namespace phi

namespace paddle {
// In order to be compatible with the original custom operator Tensor interface
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/affine_channel_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void AffineChannelGradKernel(const Context& dev_ctx,
auto* dscale = scale_grad;
auto* dbias = bias_grad;

const DataLayout layout = common::StringToDataLayout(data_layout);
const DataLayout layout = StringToDataLayout(data_layout);

auto dims = x->dims();
int N = static_cast<int>(dims[0]);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/affine_channel_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void AffineChannelKernel(const Context& dev_ctx,
auto* y = out;
dev_ctx.template Alloc<T>(y);

const DataLayout layout = common::StringToDataLayout(data_layout);
const DataLayout layout = StringToDataLayout(data_layout);

auto dims = x->dims();
int N = static_cast<int>(dims[0]);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/batch_norm_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ void BatchNormGradFunctor(const Context& dev_ctx,
DenseTensor* bias_grad) {
const auto* d_y = &y_grad;

DataLayout data_layout = common::StringToDataLayout(data_layout_str);
DataLayout data_layout = StringToDataLayout(data_layout_str);

auto* d_x = x_grad;
auto* d_scale = scale_grad;
Expand Down Expand Up @@ -391,7 +391,7 @@ void BatchNormDoubleGradKernel(
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));

const auto data_layout = common::StringToDataLayout(data_layout_str);
const auto data_layout = StringToDataLayout(data_layout_str);

const auto* ddX = x_grad_grad.get_ptr();
const auto* ddScale = scale_grad_grad.get_ptr();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/batch_norm_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ void BatchNormKernel(const Context& dev_ctx,

bool global_stats = test_mode || use_global_stats;

auto data_layout = common::StringToDataLayout(data_layout_str);
auto data_layout = StringToDataLayout(data_layout_str);

const auto& x_dims = x.dims();
PADDLE_ENFORCE_GE(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/group_norm_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void GroupNormGradKernel(const Context& dev_ctx,
}
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto& x_dims = y.dims();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/group_norm_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void GroupNormKernel(const Context& dev_ctx,
}
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/interpolate_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ static void Interpolate1DCPUBwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -507,7 +507,7 @@ static void Interpolate2DCPUBwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -664,7 +664,7 @@ static void Interpolate3DCPUBwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/interpolate_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ static void Interpolate1DCPUFwd(
bool align_corners,
int align_mode,
DenseTensor* output) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -650,7 +650,7 @@ static void Interpolate2DCPUFwd(
bool align_corners,
int align_mode,
DenseTensor* output) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -813,7 +813,7 @@ static void Interpolate3DCPUFwd(
bool align_corners,
int align_mode,
DenseTensor* output) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n = 0, c = 0, in_d = 0, in_h = 0, in_w = 0;
funcs::ExtractNCDWH(x.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/temporal_shift_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ void TemporalShiftGradKernel(const Context& dev_ctx,
auto* input_grad = x_grad;
auto* output_grad = &out_grad;
int t = seg_num;
const DataLayout data_layout = common::StringToDataLayout(data_format_str);
const DataLayout data_layout = StringToDataLayout(data_format_str);

const int nt = static_cast<int>(output_grad->dims()[0]);
const int c = static_cast<int>(data_layout == DataLayout::NCHW
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/temporal_shift_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ void TemporalShiftKernel(const Context& dev_ctx,
auto* input = &x;
auto* output = out;
int t = seg_num;
const DataLayout data_layout = common::StringToDataLayout(data_format_str);
const DataLayout data_layout = StringToDataLayout(data_format_str);

const int nt = static_cast<int>(input->dims()[0]);
const int c = static_cast<int>(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/data_layout_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ inline OneDNNMemoryFormat ToOneDNNFormat(const DataLayout& layout) {
default:
PADDLE_THROW(
errors::InvalidArgument("Fail to convert layout %s to oneDNN format.",
::common::DataLayoutToString(layout)));
DataLayoutToString(layout)));
}
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/sync_batch_norm_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ void SyncBatchNormGradFunctor(
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);

const DataLayout layout = common::StringToDataLayout(data_layout_str);
const DataLayout layout = StringToDataLayout(data_layout_str);

const auto *d_y = &y_grad;

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/onednn/fused_conv_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ KernelKey ConvGetKernelTypeForVar(const GetKernelTypeForVarContext* ctx) {
(tensor.layout() != DataLayout::ONEDNN)) {
auto it = attrs.find("data_format");
const std::string data_format = PADDLE_GET_CONST(std::string, it->second);
auto dl = common::StringToDataLayout(data_format);
auto dl = StringToDataLayout(data_format);
// Some models may have intentionally set "AnyLayout" for conv
// op. Treat this as NCHW (default data_format value)
if (dl != DataLayout::ANY) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/fusion/xpu/bn_act_xpu_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ void BNActXPUKernel(const Context& dev_ctx,
int act_type,
DenseTensor* y) {
using XPUType = typename XPUTypeTrait<T>::Type;
const auto data_layout = common::StringToDataLayout(data_layout_str);
const auto data_layout = StringToDataLayout(data_layout_str);
PADDLE_ENFORCE_EQ(data_layout_str == "NCHW" || data_layout_str == "NHWC",
true,
common::errors::InvalidArgument(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/affine_channel_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ void AffineChannelGradCUDAKernel(const Context& dev_ctx,
auto* dscale = scale_grad;
auto* dbias = bias_grad;

const DataLayout layout = common::StringToDataLayout(data_layout);
const DataLayout layout = StringToDataLayout(data_layout);

auto dims = dy->dims();
const int64_t num = dy->numel();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/affine_channel_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void AffineChannelCUDAKernel(const Context& dev_ctx,
auto* y = out;
dev_ctx.template Alloc<T>(y);

const DataLayout layout = common::StringToDataLayout(data_layout);
const DataLayout layout = StringToDataLayout(data_layout);

auto dims = x->dims();
const int64_t num = x->numel();
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ void BatchNormGradFunctor(const Context &dev_ctx,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);

const auto *d_y = &y_grad;

Expand Down Expand Up @@ -1407,7 +1407,7 @@ void BatchNormDoubleGradKernel(
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);

const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/batch_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ void BatchNormKernel(const Context &dev_ctx,
}
double epsilon = epsilon_f;
const bool trainable_stats = trainable_statistics;
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_stats);

// Get the size for each dimension.
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& dev_ctx,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
DenseTensor filter_ = filter;

if (!dx && !dfilter) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/conv_transpose_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void DepthwiseConv2dTransposeKernel(const Context& dev_ctx,
Full<T, Context>(dev_ctx, out->dims(), 0, out);
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
DenseTensor filter_ = filter;
dev_ctx.template Alloc<T>(out);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ void DepthwiseConv2dBiasGradKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/depthwise_conv2d_bias_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ void DepthwiseConv2dBiasKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ void DepthwiseConv3dBiasGradKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NDHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/depthwise_conv3d_bias_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ void DepthwiseConv3dBiasKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NDHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/depthwise_conv_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ void DepthwiseConvGradKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/depthwise_conv_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ void DepthwiseConvKernel(const Context& dev_ctx,
auto filter_dims = filter.dims();

DDim in_data_dims;
const DataLayout data_layout = common::StringToDataLayout(data_format);
const DataLayout data_layout = StringToDataLayout(data_format);
if (data_layout != DataLayout::NHWC) {
in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/group_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ void GroupNormGradKernel(const Context& dev_ctx,
return;
}
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/group_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -727,7 +727,7 @@ void GroupNormNDHWCKernel(const Context& dev_ctx,
DenseTensor* residual_out,
DenseTensor* mean,
DenseTensor* var) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
if (data_layout != DataLayout::NHWC) {
PD_THROW("data_layout only supports NHWC and NDHWC");
}
Expand Down Expand Up @@ -1123,7 +1123,7 @@ void GroupNormGeneralCaseKernel(const Context& dev_ctx,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto x_dims = x.dims();
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/gpu/interpolate_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1208,7 +1208,7 @@ static void Interpolate1DCUDABwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1317,7 +1317,7 @@ static void Interpolate2DCUDABwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1591,7 +1591,7 @@ static void InterpolateAA2DCUDABwd(
dev_ctx.template Alloc<T>(input_grad);
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1812,7 +1812,7 @@ static void Interpolate3DCUDABwd(
bool align_corners,
int align_mode,
DenseTensor* input_grad) {
const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/gpu/interpolate_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1010,7 +1010,7 @@ static void Interpolate1DCUDAFwd(
DenseTensor* output) {
auto* input_data = input.data<T>();

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1121,7 +1121,7 @@ static void Interpolate2DCUDAFwd(
DenseTensor* output) {
auto* input_data = input.data<T>();

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1367,7 +1367,7 @@ static void InterpolateAA2DCUDAFwd(
}
auto* input_data = input.data<T>();

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down Expand Up @@ -1601,7 +1601,7 @@ static void Interpolate3DCUDAFwd(
DenseTensor* output) {
auto* input_data = input.data<T>();

const DataLayout data_layout = common::StringToDataLayout(data_layout_str);
const DataLayout data_layout = StringToDataLayout(data_layout_str);
int64_t n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);

Expand Down
Loading
Loading