Skip to content

Commit 7cece4e

Browse files
authored
optimize phi::IntArray(common::vectorize()) in kernels/gpudnn (#77195)
1 parent 3e08d67 commit 7cece4e

12 files changed

+18
-46
lines changed

paddle/phi/kernels/gpudnn/affine_grid_grad_kernel.cu

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,7 @@ void AffineGridGradCudnnKernel(const Context& dev_ctx,
4242
"Only support for CUDAPlace.Please switch your context from "
4343
"CPUPlace to CUDAPlace or update your cudnn."));
4444
if (output_grad.numel() == 0 || input_grad->numel() == 0) {
45-
phi::Full<T, Context>(dev_ctx,
46-
phi::IntArray(common::vectorize(input_grad->dims())),
47-
0,
48-
input_grad);
45+
Full<T, Context>(dev_ctx, input_grad->dims(), 0, input_grad);
4946
return;
5047
}
5148
auto handle = dev_ctx.cudnn_handle();

paddle/phi/kernels/gpudnn/affine_grid_kernel.cu

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@ void AffineGridCudnnKernel(const Context& dev_ctx,
5353
output->Resize(common::make_ddim({n, h_size_data[2], h_size_data[3], 2}));
5454
T* output_data = dev_ctx.template Alloc<T>(output);
5555
if (input.numel() == 0) {
56-
phi::Full<T, Context>(
57-
dev_ctx, phi::IntArray(common::vectorize(output->dims())), 0, output);
56+
Full<T, Context>(dev_ctx, output->dims(), 0, output);
5857
return;
5958
}
6059
ScopedSpatialTransformerDescriptor st_desc;

paddle/phi/kernels/gpudnn/conv_grad_kernel.cu

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -415,11 +415,7 @@ void ConvCudnnGradKernel(const Context& dev_ctx,
415415
if (input.numel() == 0 || filter.numel() == 0) {
416416
if (input_grad) dev_ctx.template Alloc<T>(input_grad);
417417
if (filter_grad) {
418-
phi::Full<T, Context>(
419-
dev_ctx,
420-
phi::IntArray(common::vectorize(filter_grad->dims())),
421-
0,
422-
filter_grad);
418+
Full<T, Context>(dev_ctx, filter_grad->dims(), 0, filter_grad);
423419
}
424420
return;
425421
}

paddle/phi/kernels/gpudnn/conv_kernel.cu

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -306,8 +306,7 @@ void ConvCudnnKernel(const Context& dev_ctx,
306306
const std::string& data_format,
307307
DenseTensor* output) {
308308
if (input.numel() == 0 || filter.numel() == 0) {
309-
phi::Full<T, Context>(
310-
dev_ctx, phi::IntArray(common::vectorize(output->dims())), 0, output);
309+
Full<T, Context>(dev_ctx, output->dims(), 0, output);
311310
return;
312311
}
313312
dev_ctx.template Alloc<T>(output);

paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,18 +61,14 @@ void ConvTransposeGradRawGPUDNNKernel(const Context& dev_ctx,
6161
if (x.numel() == 0) {
6262
if (dx) dev_ctx.template Alloc<T>(dx);
6363
if (dfilter) {
64-
phi::Full<T, Context>(dev_ctx,
65-
phi::IntArray(common::vectorize(dfilter->dims())),
66-
0,
67-
dfilter);
64+
Full<T, Context>(dev_ctx, dfilter->dims(), 0, dfilter);
6865
}
6966
return;
7067
}
7168
if (filter.numel() == 0) {
7269
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
7370
if (dx) {
74-
phi::Full<T, Context>(
75-
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
71+
Full<T, Context>(dev_ctx, dx->dims(), 0, dx);
7672
}
7773
return;
7874
}

paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,8 +251,7 @@ void ConvTransposeRawGPUDNNKernel(const Context& dev_ctx,
251251
const std::string& data_format,
252252
DenseTensor* out) {
253253
if (x.numel() == 0 || filter.numel() == 0) {
254-
phi::Full<T, Context>(
255-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
254+
Full<T, Context>(dev_ctx, out->dims(), 0, out);
256255
return;
257256
}
258257

paddle/phi/kernels/gpudnn/pool_kernel.cu

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,7 @@ void PoolRawGPUDNNKernel(const Context& dev_ctx,
3838
const std::string& padding_algorithm,
3939
DenseTensor* out) {
4040
if (x.numel() == 0) {
41-
phi::Full<T, Context>(
42-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
41+
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
4342
return;
4443
}
4544
PADDLE_ENFORCE_EQ(
@@ -268,11 +267,9 @@ void Pool2dGPUDNNKernel(const Context& dev_ctx,
268267
DenseTensor* out) {
269268
if (x.numel() == 0) {
270269
if (pooling_type == "max") {
271-
phi::Full<T, Context>(
272-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
270+
Full<T, Context>(dev_ctx, out->dims(), 0, out);
273271
} else { // for pooling_type == "avg"
274-
phi::Full<T, Context>(
275-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
272+
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
276273
}
277274
return;
278275
}
@@ -306,11 +303,9 @@ void Pool3dGPUDNNKernel(const Context& dev_ctx,
306303
DenseTensor* out) {
307304
if (x.numel() == 0) {
308305
if (pooling_type == "max" || (!adaptive && pooling_type == "avg")) {
309-
phi::Full<T, Context>(
310-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
306+
Full<T, Context>(dev_ctx, out->dims(), 0, out);
311307
} else {
312-
phi::Full<T, Context>(
313-
dev_ctx, phi::IntArray(common::vectorize(out->dims())), NAN, out);
308+
Full<T, Context>(dev_ctx, out->dims(), NAN, out);
314309
}
315310
return;
316311
}

paddle/phi/kernels/onednn/elementwise_grad_kernel.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,15 +126,13 @@ void ElementwiseGradKernel(const OneDNNContext& dev_ctx,
126126
if (dx) {
127127
dev_ctx.template Alloc<T>(dx);
128128
if (dx->numel() != 0) {
129-
phi::Full<T, OneDNNContext>(
130-
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
129+
Full<T, OneDNNContext>(dev_ctx, dx->dims(), 0, dx);
131130
}
132131
}
133132
if (dy) {
134133
dev_ctx.template Alloc<T>(dy);
135134
if (dy->numel() != 0) {
136-
phi::Full<T, OneDNNContext>(
137-
dev_ctx, phi::IntArray(common::vectorize(dy->dims())), 0, dy);
135+
Full<T, OneDNNContext>(dev_ctx, dy->dims(), 0, dy);
138136
}
139137
}
140138
return;

paddle/phi/kernels/onednn/expand_grad_kernel.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@ void ExpandGradKernel(const Context& dev_ctx,
2929

3030
if ((in_grad && in_grad->numel() == 0) || out_grad.numel() == 0) {
3131
dev_ctx.template Alloc<T>(in_grad);
32-
phi::Full<T, Context>(
33-
dev_ctx, phi::IntArray(common::vectorize(in_grad->dims())), 0, in_grad);
32+
Full<T, Context>(dev_ctx, in_grad->dims(), 0, in_grad);
3433
return;
3534
}
3635

paddle/phi/kernels/onednn/prelu_grad_kernel.cc

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,7 @@ void PReluGradKernel(const Context& dev_ctx,
3131
if (x_grad->numel() == 0) {
3232
dev_ctx.template Alloc<T>(x_grad);
3333
if (alpha_grad) {
34-
phi::Full<T, Context>(
35-
dev_ctx,
36-
phi::IntArray(common::vectorize(alpha_grad->dims())),
37-
0,
38-
alpha_grad);
34+
Full<T, Context>(dev_ctx, alpha_grad->dims(), 0, alpha_grad);
3935
}
4036
}
4137
bool is_test = dev_ctx.HasDnnAttr("is_test")

0 commit comments

Comments
 (0)