diff --git a/paddle/phi/kernels/impl/accuracy_check_kernel_impl.h b/paddle/phi/kernels/impl/accuracy_check_kernel_impl.h index d7431495408a3e..41efa2d8803fd5 100644 --- a/paddle/phi/kernels/impl/accuracy_check_kernel_impl.h +++ b/paddle/phi/kernels/impl/accuracy_check_kernel_impl.h @@ -251,7 +251,7 @@ struct AccuracyCheckFunctor { in_data, other_data, rtol, atol, equal_nan, num, out_data); DenseTensor out_cpu; - Copy(dev_ctx, *output, phi::CPUPlace(), true, &out_cpu); + Copy(dev_ctx, *output, CPUPlace(), true, &out_cpu); auto data_ptr = out_cpu.data(); PADDLE_ENFORCE_EQ(*data_ptr, diff --git a/paddle/phi/kernels/impl/average_accumulates_kernel_impl.h b/paddle/phi/kernels/impl/average_accumulates_kernel_impl.h index d06b5a7eabcfa4..21e8c6039d1385 100644 --- a/paddle/phi/kernels/impl/average_accumulates_kernel_impl.h +++ b/paddle/phi/kernels/impl/average_accumulates_kernel_impl.h @@ -48,18 +48,17 @@ void AverageAccumulatesKernel(const Context& dev_ctx, // int64_t num_accumulates = 0; // int64_t old_num_accumulates = 0; - auto num_updates_cpu = - phi::memory_utils::Alloc(phi::CPUPlace(), sizeof(int64_t)); + auto num_updates_cpu = phi::memory_utils::Alloc(CPUPlace(), sizeof(int64_t)); int64_t* num_updates_cpu_ptr = reinterpret_cast(num_updates_cpu->ptr()); auto num_accumulates_cpu = - phi::memory_utils::Alloc(phi::CPUPlace(), sizeof(int64_t)); + phi::memory_utils::Alloc(CPUPlace(), sizeof(int64_t)); int64_t* num_accumulates_cpu_ptr = reinterpret_cast(num_accumulates_cpu->ptr()); auto old_num_accumulates_cpu = - phi::memory_utils::Alloc(phi::CPUPlace(), sizeof(int64_t)); + phi::memory_utils::Alloc(CPUPlace(), sizeof(int64_t)); int64_t* old_num_accumulates_cpu_ptr = reinterpret_cast(old_num_accumulates_cpu->ptr()); diff --git a/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h b/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h index 90a8b578d6968b..a9f2bacce997c1 100644 --- a/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h +++ b/paddle/phi/kernels/impl/beam_search_decode_kernel_impl.h @@ -49,7 +49,7 @@ struct BeamSearchDecodeFunctor { if (tensor_on_gpu_) { dev_ctx->Wait(); } - Copy(*dev_ctx, step_id, phi::CPUPlace(), false, &out); + Copy(*dev_ctx, step_id, CPUPlace(), false, &out); dev_ctx->Wait(); } @@ -72,7 +72,7 @@ struct BeamSearchDecodeFunctor { if (tensor_on_gpu_) { dev_ctx->Wait(); } - Copy(*dev_ctx, step_score, phi::CPUPlace(), false, &out); + Copy(*dev_ctx, step_score, CPUPlace(), false, &out); dev_ctx->Wait(); } diff --git a/paddle/phi/kernels/impl/clip_by_norm_kernel_impl.h b/paddle/phi/kernels/impl/clip_by_norm_kernel_impl.h index 5da23147d004b4..8774234368617b 100644 --- a/paddle/phi/kernels/impl/clip_by_norm_kernel_impl.h +++ b/paddle/phi/kernels/impl/clip_by_norm_kernel_impl.h @@ -45,7 +45,7 @@ void ClipByNormFunctor(const Context& dev_ctx, temp + (static_cast(1) - temp) * max_norm / (x_norm + epsilon); Eigen::array one_dim{{1}}; Eigen::DSizes m_dsize(input->numel()); - if (dev_ctx.GetPlace() == phi::CPUPlace()) { + if (dev_ctx.GetPlace() == CPUPlace()) { out.device(*place) = x * scaling.reshape(one_dim).eval().broadcast(m_dsize); } else { out.device(*place) = x * scaling.reshape(one_dim).broadcast(m_dsize); diff --git a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h index 5566d520366b63..c20186deee3a4d 100644 --- a/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/determinant_grad_kernel_impl.h @@ -92,7 +92,7 @@ inline bool CheckMatrixInvertible(const Context& dev_ctx, // copy to host DenseTensor cpu_tensor; - Copy(dev_ctx, dev_tensor, phi::CPUPlace(), false, &cpu_tensor); + Copy(dev_ctx, dev_tensor, CPUPlace(), false, &cpu_tensor); // if founded zero, the matrix is not invertible // else the matrix is invertible diff --git a/paddle/phi/kernels/impl/fetch_impl.h b/paddle/phi/kernels/impl/fetch_impl.h index b4acadcd019506..122f0329e0d3ac 100644 --- a/paddle/phi/kernels/impl/fetch_impl.h +++ b/paddle/phi/kernels/impl/fetch_impl.h @@ -26,7 +26,7 @@ void FetchKernel(const Context& dev_ctx, if (!x.IsInitialized()) { return; } - Copy(dev_ctx, x, phi::CPUPlace(), true, out); + Copy(dev_ctx, x, CPUPlace(), true, out); } template @@ -35,7 +35,7 @@ void FetchArrayKernel(const Context& dev_ctx, TensorArray* out) { out->resize(x.size()); for (size_t i = 0; i < x.size(); ++i) { - Copy(dev_ctx, x[i], phi::CPUPlace(), true, &(out->at(i))); + Copy(dev_ctx, x[i], CPUPlace(), true, &(out->at(i))); } } diff --git a/paddle/phi/kernels/impl/gru_unit_kernel_impl.h b/paddle/phi/kernels/impl/gru_unit_kernel_impl.h index 6990989b3c329a..b57e2ead4f5958 100644 --- a/paddle/phi/kernels/impl/gru_unit_kernel_impl.h +++ b/paddle/phi/kernels/impl/gru_unit_kernel_impl.h @@ -35,7 +35,7 @@ void ActCompute( } else if (act_type == tanh) { funcs::TanhFunctor()(d, x, y); } else if (act_type == relu) { - if (place == phi::CPUPlace()) + if (place == CPUPlace()) funcs::ReluCPUFunctor()(d, x, y); else funcs::ReluCUDAFunctor()(d, x, y); diff --git a/paddle/phi/kernels/impl/im2sequence_kernel_impl.h b/paddle/phi/kernels/impl/im2sequence_kernel_impl.h index 536612a16f91f7..516c613c4725ba 100644 --- a/paddle/phi/kernels/impl/im2sequence_kernel_impl.h +++ b/paddle/phi/kernels/impl/im2sequence_kernel_impl.h @@ -50,7 +50,7 @@ void Im2SequenceKernel(const Context& dev_ctx, const DenseTensor* img_real_size = y.get_ptr(); DenseTensor cpu_shape_tensor; - Copy(dev_ctx, *img_real_size, phi::CPUPlace(), true, &cpu_shape_tensor); + Copy(dev_ctx, *img_real_size, CPUPlace(), true, &cpu_shape_tensor); std::vector img_real_h; std::vector img_real_w; std::vector output_height; diff --git a/paddle/phi/kernels/impl/isclose_kernel_impl.h b/paddle/phi/kernels/impl/isclose_kernel_impl.h index 667945acf6c418..29b1b3a3d3011c 100644 --- a/paddle/phi/kernels/impl/isclose_kernel_impl.h +++ b/paddle/phi/kernels/impl/isclose_kernel_impl.h @@ -267,7 +267,7 @@ struct GetTensorValue { T value; const auto gpu_place = dev_ctx.GetPlace(); memory_utils::Copy( - phi::CPUPlace(), &value, gpu_place, data, sizeof(T), dev_ctx.stream()); + CPUPlace(), &value, gpu_place, data, sizeof(T), dev_ctx.stream()); return value; } }; diff --git a/paddle/phi/kernels/impl/lamb_kernel_impl.h b/paddle/phi/kernels/impl/lamb_kernel_impl.h index a19d4969e4bbda..5e0c0554714205 100644 --- a/paddle/phi/kernels/impl/lamb_kernel_impl.h +++ b/paddle/phi/kernels/impl/lamb_kernel_impl.h @@ -186,8 +186,7 @@ void ComputeImpl(const Context& dev_ctx, // Diff from here if (dev_ctx.GetPlace().GetType() == AllocationType::GPU && - beta1_pow.place() == phi::CPUPlace() && - beta2_pow.place() == phi::CPUPlace()) { + beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) { LambMomentREGUpdateFunctor moment_update_functor( weight_decay, beta1, diff --git a/paddle/phi/kernels/impl/lod_reset_kernel_impl.h b/paddle/phi/kernels/impl/lod_reset_kernel_impl.h index 48dd49ba581484..90259cbd423d00 100644 --- a/paddle/phi/kernels/impl/lod_reset_kernel_impl.h +++ b/paddle/phi/kernels/impl/lod_reset_kernel_impl.h @@ -56,7 +56,7 @@ void LodResetKernel(const Context& dev_ctx, auto* lod = lod_t->data(); DenseTensor lod_cpu; if (lod_t->place().GetType() == phi::AllocationType::GPU) { - Copy(dev_ctx, *lod_t, phi::CPUPlace(), true, &lod_cpu); + Copy(dev_ctx, *lod_t, CPUPlace(), true, &lod_cpu); lod = lod_cpu.data(); } level0 = std::vector(lod, lod + lod_t->numel()); diff --git a/paddle/phi/kernels/impl/lstsq_kernel_impl.h b/paddle/phi/kernels/impl/lstsq_kernel_impl.h index ba334ca2d48589..725143a8b2f2e7 100644 --- a/paddle/phi/kernels/impl/lstsq_kernel_impl.h +++ b/paddle/phi/kernels/impl/lstsq_kernel_impl.h @@ -233,7 +233,7 @@ inline void BatchedOrmqr(const GPUContext& dev_ctx, // check the error info int info_h; - memory_utils::Copy(phi::CPUPlace(), + memory_utils::Copy(CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d, @@ -302,7 +302,7 @@ inline void BatchedOrmqr(const GPUContext& dev_ctx, // check the error info int info_h; - memory_utils::Copy(phi::CPUPlace(), + memory_utils::Copy(CPUPlace(), &info_h, dev_ctx.GetPlace(), info_d, diff --git a/paddle/phi/kernels/impl/lu_kernel_impl.h b/paddle/phi/kernels/impl/lu_kernel_impl.h index 38eb9bd2c02ec2..38d54f1615ccf6 100644 --- a/paddle/phi/kernels/impl/lu_kernel_impl.h +++ b/paddle/phi/kernels/impl/lu_kernel_impl.h @@ -478,7 +478,7 @@ void Unpack_Pivot(const Context& dev_ctx, auto prank = Pdimvec.size(); auto Pnum = dims[prank - 1]; DenseTensor Pivot_cpu; - phi::CPUPlace cpu; + CPUPlace cpu; Copy(dev_ctx, Pivot, cpu, false, &Pivot_cpu); auto pdataptr = Pivot_cpu.data(); Pdimvec[prank - 1] = h; diff --git a/paddle/phi/kernels/impl/numel_kernel_impl.h b/paddle/phi/kernels/impl/numel_kernel_impl.h index 3154614443ecf1..fecead18cae0a4 100644 --- a/paddle/phi/kernels/impl/numel_kernel_impl.h +++ b/paddle/phi/kernels/impl/numel_kernel_impl.h @@ -25,7 +25,7 @@ void NumelKernel(const Context& dev_ctx, auto place = dev_ctx.GetPlace(); auto out_data = dev_ctx.template Alloc(out); - if (place == phi::CPUPlace()) { + if (place == CPUPlace()) { out_data[0] = input.numel(); } else { DenseTensor cpu_tensor; diff --git a/paddle/phi/kernels/impl/sequence_mask_kernel_impl.h b/paddle/phi/kernels/impl/sequence_mask_kernel_impl.h index 78b65b26f2f935..c7f5a35ae97da1 100644 --- a/paddle/phi/kernels/impl/sequence_mask_kernel_impl.h +++ b/paddle/phi/kernels/impl/sequence_mask_kernel_impl.h @@ -78,7 +78,7 @@ void SequenceMaskKernel(const Context& dev_ctx, bool is_gpu_place = dev_ctx.GetPlace().GetType() == AllocationType::GPU; if (is_gpu_place) { DenseTensor temp; - Copy(dev_ctx, *max_len_tensor.get_ptr(), phi::CPUPlace(), false, &temp); + Copy(dev_ctx, *max_len_tensor.get_ptr(), CPUPlace(), false, &temp); maxlen = *temp.data(); } else { maxlen = *max_len_tensor.get_ptr()->data(); diff --git a/paddle/phi/kernels/impl/sequence_pool_kernel_impl.h b/paddle/phi/kernels/impl/sequence_pool_kernel_impl.h index 4e0fac5cf6740a..3618c648f3c3df 100644 --- a/paddle/phi/kernels/impl/sequence_pool_kernel_impl.h +++ b/paddle/phi/kernels/impl/sequence_pool_kernel_impl.h @@ -65,7 +65,7 @@ void SequencePoolKernel(const Context& dev_ctx, // Do not create index buffer for inference mode if (pooltype == "MAX" && - (is_test == false || (dev_ctx.GetPlace() == phi::CPUPlace()) == false)) { + (is_test == false || (dev_ctx.GetPlace() == CPUPlace()) == false)) { index = max_index; index->Resize({dims}); dev_ctx.template Alloc(index); diff --git a/paddle/phi/kernels/impl/warpctc_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_kernel_impl.h index 920753c6c5c806..58828d36dd5073 100644 --- a/paddle/phi/kernels/impl/warpctc_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_kernel_impl.h @@ -276,8 +276,8 @@ void WarpctcKernel(const Context& dev_ctx, DenseTensor logits_length_cpu; DenseTensor labels_length_cpu; - Copy(dev_ctx, *logits_length, phi::CPUPlace(), false, &logits_length_cpu); - Copy(dev_ctx, *labels_length, phi::CPUPlace(), false, &labels_length_cpu); + Copy(dev_ctx, *logits_length, CPUPlace(), false, &logits_length_cpu); + Copy(dev_ctx, *labels_length, CPUPlace(), false, &labels_length_cpu); logits_lod.push_back(0); label_lod.push_back(0); @@ -358,7 +358,7 @@ void WarpctcKernel(const Context& dev_ctx, T* pad_value_data = dev_ctx.template HostAlloc(&cpu_pad_value); *pad_value_data = static_cast(0); DenseTensor pad_value; - if (dev_ctx.GetPlace() == phi::CPUPlace()) { + if (dev_ctx.GetPlace() == CPUPlace()) { pad_value = cpu_pad_value; } else { Copy(dev_ctx, cpu_pad_value, dev_ctx.GetPlace(), true, &pad_value); @@ -401,7 +401,7 @@ void WarpctcKernel(const Context& dev_ctx, lod.push_back(label_lod); warpctc_label.set_lod(lod); - if (dev_ctx.GetPlace() == phi::CPUPlace()) { + if (dev_ctx.GetPlace() == CPUPlace()) { funcs::UnpaddingDenseTensorFunctor()( dev_ctx, label, @@ -424,10 +424,10 @@ void WarpctcKernel(const Context& dev_ctx, 0 /*lod_level*/, false /*norm_by_times*/, funcs::kBatchLengthWidth); - Copy(dev_ctx, gpu_label, phi::CPUPlace(), true, &warpctc_label); + Copy(dev_ctx, gpu_label, CPUPlace(), true, &warpctc_label); } } else { - Copy(dev_ctx, label, phi::CPUPlace(), true, &warpctc_label); + Copy(dev_ctx, label, CPUPlace(), true, &warpctc_label); } const int* warpctc_label_data = warpctc_label.data();