Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions paddle/phi/kernels/cpu/adam_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,7 @@ PADDLE_API void AdamDenseKernel(
const T* grad_ptr = grad.data<T>();

auto adam =
phi::jit::KernelFuncs<phi::jit::AdamTuple<T>, phi::CPUPlace>::Cache().At(
attr);
phi::jit::KernelFuncs<phi::jit::AdamTuple<T>, CPUPlace>::Cache().At(attr);

static constexpr int64_t chunk_size = 512;

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/adamw_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ PADDLE_API void AdamwDenseKernel(
const T* grad_ptr = grad.data<T>();

auto adamw =
phi::jit::KernelFuncs<phi::jit::AdamWTuple<T>, phi::CPUPlace>::Cache().At(
phi::jit::KernelFuncs<phi::jit::AdamWTuple<T>, CPUPlace>::Cache().At(
attr);

static constexpr int64_t chunk_size = 512;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/crf_decoding_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ void Decode(const Context& dev_ctx,
DenseTensor track;
track.Resize(emission_dims);
int* track_value = dev_ctx.template Alloc<int>(&track);
auto ker = phi::jit::KernelFuncs<phi::jit::CRFDecodingTuple<T>,
phi::CPUPlace>::Cache()
.At(tag_num);
auto ker =
phi::jit::KernelFuncs<phi::jit::CRFDecodingTuple<T>, CPUPlace>::Cache()
.At(tag_num);
ker(static_cast<int>(seq_len), x, w, alpha_value, track_value, tag_num);
T max_score = -std::numeric_limits<T>::max();
int max_i = 0;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/eig.h
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ void MagmaEig(const Context& dev_ctx,
// magma will modify original input, so copy to cpu at any case
DenseTensor input_copy_cpu;
input_copy_cpu.Resize(input.dims());
Copy(dev_ctx, input, phi::CPUPlace(), false, &input_copy_cpu);
Copy(dev_ctx, input, CPUPlace(), false, &input_copy_cpu);

using RealT = typename phi::dtype::Real<T>;
magma_vec_t jobvr = MagmaVec;
Expand All @@ -274,7 +274,7 @@ void MagmaEig(const Context& dev_ctx,
phi::dtype::Real<T>* rwork_data = nullptr;

rwork.Resize(common::make_ddim({lda * 2}));
auto cpu_place = phi::CPUPlace();
auto cpu_place = CPUPlace();
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto* cpu_ctx = static_cast<phi::CPUContext*>(pool.Get(cpu_place));
rwork_data = (*cpu_ctx).template Alloc<phi::dtype::Real<T>>(&rwork);
Expand Down Expand Up @@ -345,7 +345,7 @@ void ApplyEigKernelMagma(const Context& dev_ctx,

DenseTensor vectors_row_major_cpu;
vectors_row_major_cpu.Resize(input.dims());
auto cpu_place = phi::CPUPlace();
auto cpu_place = CPUPlace();
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto* cpu_ctx = static_cast<phi::CPUContext*>(pool.Get(cpu_place));
(*cpu_ctx).template Alloc<T>(&vectors_row_major_cpu);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/fusion_seqpool_concat_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ void FusionSeqPoolConcatKernel(const Context& dev_ctx,
attr.type = phi::jit::SeqPoolType::kSqrt;
}
auto seqpool =
phi::jit::KernelFuncs<phi::jit::SeqPoolTuple<T>, phi::CPUPlace>::Cache()
.At(attr);
phi::jit::KernelFuncs<phi::jit::SeqPoolTuple<T>, CPUPlace>::Cache().At(
attr);
size_t n = ins.size();
size_t dst_step_size = n * w;
for (size_t i = 0; i < n; ++i) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_select_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ void IndexSelectInner(const Context& dev_ctx,

DenseTensor index_cpu_copy;
if (index.place().GetType() != AllocationType::CPU) {
Copy(dev_ctx, index, phi::CPUPlace(), true, &index_cpu_copy);
Copy(dev_ctx, index, CPUPlace(), true, &index_cpu_copy);
}
const IndexT* index_data = index.place().GetType() == AllocationType::CPU
? index.data<IndexT>()
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/layer_norm_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ void LayerNormKernel(const Context& dev_ctx,
}

auto ker =
phi::jit::KernelFuncs<phi::jit::LayerNormTuple<T>, phi::CPUPlace>::Cache()
.At(right);
phi::jit::KernelFuncs<phi::jit::LayerNormTuple<T>, CPUPlace>::Cache().At(
right);
ker(x_tmp.data<T>(),
out.data<T>(),
mean_tmp.data<T>(),
Expand Down
6 changes: 2 additions & 4 deletions paddle/phi/kernels/cpu/sgd_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ void sgd_dense_param_dense_grad_impl(const DenseTensor& param,
T* out_data = param_out->data<T>();

auto sgd =
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, phi::CPUPlace>::Cache().At(
attr);
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, CPUPlace>::Cache().At(attr);
sgd(lr, param_data, grad_data, &rows_idx, out_data, &attr);
}

Expand Down Expand Up @@ -76,8 +75,7 @@ void sgd_dense_param_sparse_grad_impl(const DenseTensor& param,
attr.selected_rows_size = static_cast<int>(grad_rows.size());

auto sgd =
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, phi::CPUPlace>::Cache().At(
attr);
phi::jit::KernelFuncs<phi::jit::SgdTuple<T>, CPUPlace>::Cache().At(attr);
sgd(lr, param_data, grad_data, rows_data, out_data, &attr);
}

Expand Down
Loading