Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions paddle/phi/kernels/gpudnn/affine_grid_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,7 @@ void AffineGridGradCudnnKernel(const Context& dev_ctx,
"Only support for CUDAPlace.Please switch your context from "
"CPUPlace to CUDAPlace or update your cudnn."));
if (output_grad.numel() == 0 || input_grad->numel() == 0) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(input_grad->dims())),
0,
input_grad);
Full<T, Context>(dev_ctx, input_grad->dims(), 0, input_grad);
return;
}
auto handle = dev_ctx.cudnn_handle();
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/gpudnn/affine_grid_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ void AffineGridCudnnKernel(const Context& dev_ctx,
output->Resize(common::make_ddim({n, h_size_data[2], h_size_data[3], 2}));
T* output_data = dev_ctx.template Alloc<T>(output);
if (input.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(output->dims())), 0, output);
Full<T, Context>(dev_ctx, output->dims(), 0, output);
return;
}
ScopedSpatialTransformerDescriptor st_desc;
Expand Down
6 changes: 1 addition & 5 deletions paddle/phi/kernels/gpudnn/conv_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -415,11 +415,7 @@ void ConvCudnnGradKernel(const Context& dev_ctx,
if (input.numel() == 0 || filter.numel() == 0) {
if (input_grad) dev_ctx.template Alloc<T>(input_grad);
if (filter_grad) {
phi::Full<T, Context>(
dev_ctx,
phi::IntArray(common::vectorize(filter_grad->dims())),
0,
filter_grad);
Full<T, Context>(dev_ctx, filter_grad->dims(), 0, filter_grad);
}
return;
}
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/gpudnn/conv_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,7 @@ void ConvCudnnKernel(const Context& dev_ctx,
const std::string& data_format,
DenseTensor* output) {
if (input.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(output->dims())), 0, output);
Full<T, Context>(dev_ctx, output->dims(), 0, output);
return;
}
dev_ctx.template Alloc<T>(output);
Expand Down
8 changes: 2 additions & 6 deletions paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,14 @@ void ConvTransposeGradRawGPUDNNKernel(const Context& dev_ctx,
if (x.numel() == 0) {
if (dx) dev_ctx.template Alloc<T>(dx);
if (dfilter) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(dfilter->dims())),
0,
dfilter);
Full<T, Context>(dev_ctx, dfilter->dims(), 0, dfilter);
}
return;
}
if (filter.numel() == 0) {
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
if (dx) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
Full<T, Context>(dev_ctx, dx->dims(), 0, dx);
}
return;
}
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,7 @@ void ConvTransposeRawGPUDNNKernel(const Context& dev_ctx,
const std::string& data_format,
DenseTensor* out) {
if (x.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
Full<T, Context>(dev_ctx, out->dims(), 0, out);
return;
}

Expand Down
15 changes: 5 additions & 10 deletions paddle/phi/kernels/gpudnn/pool_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ void PoolRawGPUDNNKernel(const Context& dev_ctx,
const std::string& padding_algorithm,
DenseTensor* out) {
if (x.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
return;
}
PADDLE_ENFORCE_EQ(
Expand Down Expand Up @@ -268,11 +267,9 @@ void Pool2dGPUDNNKernel(const Context& dev_ctx,
DenseTensor* out) {
if (x.numel() == 0) {
if (pooling_type == "max") {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
Full<T, Context>(dev_ctx, out->dims(), 0, out);
} else { // for pooling_type == "avg"
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
}
return;
}
Expand Down Expand Up @@ -306,11 +303,9 @@ void Pool3dGPUDNNKernel(const Context& dev_ctx,
DenseTensor* out) {
if (x.numel() == 0) {
if (pooling_type == "max" || (!adaptive && pooling_type == "avg")) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
Full<T, Context>(dev_ctx, out->dims(), 0, out);
} else {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
}
return;
}
Expand Down
6 changes: 2 additions & 4 deletions paddle/phi/kernels/onednn/elementwise_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,13 @@ void ElementwiseGradKernel(const OneDNNContext& dev_ctx,
if (dx) {
dev_ctx.template Alloc<T>(dx);
if (dx->numel() != 0) {
phi::Full<T, OneDNNContext>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
Full<T, OneDNNContext>(dev_ctx, dx->dims(), 0, dx);
}
}
if (dy) {
dev_ctx.template Alloc<T>(dy);
if (dy->numel() != 0) {
phi::Full<T, OneDNNContext>(
dev_ctx, phi::IntArray(common::vectorize(dy->dims())), 0, dy);
Full<T, OneDNNContext>(dev_ctx, dy->dims(), 0, dy);
}
}
return;
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/onednn/expand_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ void ExpandGradKernel(const Context& dev_ctx,

if ((in_grad && in_grad->numel() == 0) || out_grad.numel() == 0) {
dev_ctx.template Alloc<T>(in_grad);
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(in_grad->dims())), 0, in_grad);
Full<T, Context>(dev_ctx, in_grad->dims(), 0, in_grad);
return;
}

Expand Down
6 changes: 1 addition & 5 deletions paddle/phi/kernels/onednn/prelu_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,7 @@ void PReluGradKernel(const Context& dev_ctx,
if (x_grad->numel() == 0) {
dev_ctx.template Alloc<T>(x_grad);
if (alpha_grad) {
phi::Full<T, Context>(
dev_ctx,
phi::IntArray(common::vectorize(alpha_grad->dims())),
0,
alpha_grad);
Full<T, Context>(dev_ctx, alpha_grad->dims(), 0, alpha_grad);
}
}
bool is_test = dev_ctx.HasDnnAttr("is_test")
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/onednn/reduce_mean_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ void MeanRawKernel(const Context& dev_ctx,
bool reduce_all,
DenseTensor* out) {
if (x.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
return;
}

Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/onednn/reshape_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ void ReshapeGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
if ((x_grad && x_grad->numel() == 0) || out_grad.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(x_grad->dims())), 0, x_grad);
Full<T, Context>(dev_ctx, x_grad->dims(), 0, x_grad);
return;
}

Expand Down
Loading