Skip to content

Commit 2e3b5ca

Browse files
committed
optimize phi::CPUPlace in kernels/cpu/
1 parent ea557ba commit 2e3b5ca

File tree

8 files changed

+15
-18
lines changed

8 files changed

+15
-18
lines changed

paddle/phi/kernels/cpu/adam_kernel.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,7 @@ PADDLE_API void AdamDenseKernel(
140140
const T* grad_ptr = grad.data<T>();
141141

142142
auto adam =
143-
phi::jit::KernelFuncs<phi::jit::AdamTuple<T>, phi::CPUPlace>::Cache().At(
144-
attr);
143+
phi::jit::KernelFuncs<phi::jit::AdamTuple<T>, CPUPlace>::Cache().At(attr);
145144

146145
static constexpr int64_t chunk_size = 512;
147146

paddle/phi/kernels/cpu/adamw_kernel.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ PADDLE_API void AdamwDenseKernel(
153153
const T* grad_ptr = grad.data<T>();
154154

155155
auto adamw =
156-
phi::jit::KernelFuncs<phi::jit::AdamWTuple<T>, phi::CPUPlace>::Cache().At(
156+
phi::jit::KernelFuncs<phi::jit::AdamWTuple<T>, CPUPlace>::Cache().At(
157157
attr);
158158

159159
static constexpr int64_t chunk_size = 512;

paddle/phi/kernels/cpu/crf_decoding_kernel.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,9 @@ void Decode(const Context& dev_ctx,
4141
DenseTensor track;
4242
track.Resize(emission_dims);
4343
int* track_value = dev_ctx.template Alloc<int>(&track);
44-
auto ker = phi::jit::KernelFuncs<phi::jit::CRFDecodingTuple<T>,
45-
phi::CPUPlace>::Cache()
46-
.At(tag_num);
44+
auto ker =
45+
phi::jit::KernelFuncs<phi::jit::CRFDecodingTuple<T>, CPUPlace>::Cache()
46+
.At(tag_num);
4747
ker(static_cast<int>(seq_len), x, w, alpha_value, track_value, tag_num);
4848
T max_score = -std::numeric_limits<T>::max();
4949
int max_i = 0;

paddle/phi/kernels/cpu/eig.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ void MagmaEig(const Context& dev_ctx,
249249
// magma will modify original input, so copy to cpu at any case
250250
DenseTensor input_copy_cpu;
251251
input_copy_cpu.Resize(input.dims());
252-
Copy(dev_ctx, input, phi::CPUPlace(), false, &input_copy_cpu);
252+
Copy(dev_ctx, input, CPUPlace(), false, &input_copy_cpu);
253253

254254
using RealT = typename phi::dtype::Real<T>;
255255
magma_vec_t jobvr = MagmaVec;
@@ -274,7 +274,7 @@ void MagmaEig(const Context& dev_ctx,
274274
phi::dtype::Real<T>* rwork_data = nullptr;
275275

276276
rwork.Resize(common::make_ddim({lda * 2}));
277-
auto cpu_place = phi::CPUPlace();
277+
auto cpu_place = CPUPlace();
278278
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
279279
auto* cpu_ctx = static_cast<phi::CPUContext*>(pool.Get(cpu_place));
280280
rwork_data = (*cpu_ctx).template Alloc<phi::dtype::Real<T>>(&rwork);
@@ -345,7 +345,7 @@ void ApplyEigKernelMagma(const Context& dev_ctx,
345345

346346
DenseTensor vectors_row_major_cpu;
347347
vectors_row_major_cpu.Resize(input.dims());
348-
auto cpu_place = phi::CPUPlace();
348+
auto cpu_place = CPUPlace();
349349
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
350350
auto* cpu_ctx = static_cast<phi::CPUContext*>(pool.Get(cpu_place));
351351
(*cpu_ctx).template Alloc<T>(&vectors_row_major_cpu);

paddle/phi/kernels/cpu/fusion_seqpool_concat_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ void FusionSeqPoolConcatKernel(const Context& dev_ctx,
5555
attr.type = phi::jit::SeqPoolType::kSqrt;
5656
}
5757
auto seqpool =
58-
phi::jit::KernelFuncs<phi::jit::SeqPoolTuple<T>, phi::CPUPlace>::Cache()
59-
.At(attr);
58+
phi::jit::KernelFuncs<phi::jit::SeqPoolTuple<T>, CPUPlace>::Cache().At(
59+
attr);
6060
size_t n = ins.size();
6161
size_t dst_step_size = n * w;
6262
for (size_t i = 0; i < n; ++i) {

paddle/phi/kernels/cpu/index_select_impl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ void IndexSelectInner(const Context& dev_ctx,
6565

6666
DenseTensor index_cpu_copy;
6767
if (index.place().GetType() != AllocationType::CPU) {
68-
Copy(dev_ctx, index, phi::CPUPlace(), true, &index_cpu_copy);
68+
Copy(dev_ctx, index, CPUPlace(), true, &index_cpu_copy);
6969
}
7070
const IndexT* index_data = index.place().GetType() == AllocationType::CPU
7171
? index.data<IndexT>()

paddle/phi/kernels/cpu/layer_norm_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ void LayerNormKernel(const Context& dev_ctx,
133133
}
134134

135135
auto ker =
136-
phi::jit::KernelFuncs<phi::jit::LayerNormTuple<T>, phi::CPUPlace>::Cache()
137-
.At(right);
136+
phi::jit::KernelFuncs<phi::jit::LayerNormTuple<T>, CPUPlace>::Cache().At(
137+
right);
138138
ker(x_tmp.data<T>(),
139139
out.data<T>(),
140140
mean_tmp.data<T>(),

paddle/phi/kernels/cpu/sgd_kernel.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@ void sgd_dense_param_dense_grad_impl(const DenseTensor& param,
3535
T* out_data = param_out->data<T>();
3636

3737
auto sgd =
38-
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, phi::CPUPlace>::Cache().At(
39-
attr);
38+
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, CPUPlace>::Cache().At(attr);
4039
sgd(lr, param_data, grad_data, &rows_idx, out_data, &attr);
4140
}
4241

@@ -76,8 +75,7 @@ void sgd_dense_param_sparse_grad_impl(const DenseTensor& param,
7675
attr.selected_rows_size = static_cast<int>(grad_rows.size());
7776

7877
auto sgd =
79-
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, phi::CPUPlace>::Cache().At(
80-
attr);
78+
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, CPUPlace>::Cache().At(attr);
8179
sgd(lr, param_data, grad_data, rows_data, out_data, &attr);
8280
}
8381

0 commit comments

Comments
 (0)