Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/compiled_program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -646,7 +646,7 @@ void CompiledProgram::BCastParamsToDevices(const std::vector<std::string> &vars,
common::errors::PreconditionNotMet("Not compiled with BKCL."));
#endif
} else {
phi::CPUPlace cpu;
CPUPlace cpu;
for (size_t i = 1; i < member_->places_.size(); ++i) {
auto local_scope = member_->local_scopes_[i];
auto *t = local_scope->Var(var)->GetMutable<phi::DenseTensor>();
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -941,7 +941,7 @@ static void RegisterOperatorKernel(
op_kernel_func = func;
}
RegisterOperatorKernelWithPlace(
name, op_kernel_func, proto::VarType::RAW, phi::CPUPlace());
name, op_kernel_func, proto::VarType::RAW, CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
RegisterOperatorKernelWithPlace(
name, op_kernel_func, proto::VarType::RAW, phi::GPUPlace());
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1036,7 +1036,7 @@ void MultiSlotDataFeed::PutToFeedVec(
const auto& offset = ins_vec[i].GetOffset();
int total_instance = static_cast<int>(offset.back());
VLOG(4) << "total_instance: " << total_instance;
// phi::CPUPlace()
// CPUPlace()
VLOG(4) << "this->place_: " << this->place_;
if (type[0] == 'f') { // float
const auto& feasign = ins_vec[i].GetFloatData();
Expand Down Expand Up @@ -3121,7 +3121,7 @@ void SlotRecordInMemoryDataFeed::PackToScope(MiniBatchGpuPack* pack,
lod.resize(1);
lod[0].resize(offset_cols_size);
phi::MixVector<size_t> mixv_lod(&lod[0]);
memcpy(mixv_lod.MutableData(phi::CPUPlace()),
memcpy(mixv_lod.MutableData(CPUPlace()),
off_start_ptr,
offset_cols_size * sizeof(size_t));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/nan_inf_utils_detail.cc
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ void CheckVarHasNanOrInf(const std::string& op_type,
}

float* cpu_data = new float[tensor->numel()];
memory::Copy(phi::CPUPlace(),
memory::Copy(CPUPlace(),
static_cast<void*>(cpu_data),
tensor->place(),
static_cast<const void*>(tensor->data<float>()),
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/device_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ void DeviceWorker::DumpParam(const Scope& scope, const int batch_id) {
}
phi::DenseTensor cpu_tensor;
if (phi::is_gpu_place(tensor->place())) {
TensorCopySync(*tensor, phi::CPUPlace(), &cpu_tensor);
TensorCopySync(*tensor, CPUPlace(), &cpu_tensor);
tensor = &cpu_tensor;
}
int64_t len = tensor->numel();
Expand Down Expand Up @@ -376,7 +376,7 @@ void DeviceWorker::DumpField(
}
phi::DenseTensor cpu_tensor;
if (phi::is_gpu_place(tensor->place())) {
TensorCopySync(*tensor, phi::CPUPlace(), &cpu_tensor);
TensorCopySync(*tensor, CPUPlace(), &cpu_tensor);
cpu_tensor.set_lod(tensor->lod());
tensor = &cpu_tensor;
}
Expand Down Expand Up @@ -464,7 +464,7 @@ void DeviceWorker::DumpField(
}
phi::DenseTensor cpu_tensor;
if (phi::is_gpu_place(tensor->place())) {
TensorCopySync(*tensor, phi::CPUPlace(), &cpu_tensor);
TensorCopySync(*tensor, CPUPlace(), &cpu_tensor);
cpu_tensor.set_lod(tensor->lod());
tensor = &cpu_tensor;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/dlpack_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ static ::DLDataType GetDLDataTypeFromTypeIndex(phi::DataType type) {
struct DLDeviceVisitor {
using argument_type = const phi::Place &;
using result_type = ::DLDevice;
inline ::DLDevice operator()(const phi::CPUPlace &place) const {
inline ::DLDevice operator()(const CPUPlace &place) const {
::DLDevice device;
device.device_type = kDLCPU;
device.device_id = 0;
Expand Down Expand Up @@ -276,7 +276,7 @@ ::DLDataType PhiDataTypeToDLDataType(phi::DataType dtype) {
phi::Place DLDeviceToPlace(const ::DLDevice &dl_device) {
phi::Place place;
if (dl_device.device_type == kDLCPU) {
place = phi::CPUPlace();
place = CPUPlace();
} else if (dl_device.device_type == kDLCUDA) {
place = phi::GPUPlace(dl_device.device_id);
} else if (dl_device.device_type == kDLCUDAHost) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/downpour_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ void DownpourWorker::FillSparseValue(size_t table_idx) {
continue;
}
phi::DenseTensor* tensor_emb = var_emb->GetMutable<phi::DenseTensor>();
float* ptr = tensor_emb->mutable_data<float>({len, table.emb_dim()},
phi::CPUPlace());
float* ptr =
tensor_emb->mutable_data<float>({len, table.emb_dim()}, CPUPlace());
memset(ptr, 0, sizeof(float) * len * table.emb_dim());
auto& tensor_lod = tensor->lod()[0];
LegacyLoD data_lod{tensor_lod};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/garbage_collector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ GarbageCollector::GarbageCollector(const phi::Place &place,
}
}

CPUGarbageCollector::CPUGarbageCollector(const phi::CPUPlace &place,
CPUGarbageCollector::CPUGarbageCollector(const CPUPlace &place,
size_t max_memory_size)
: GarbageCollector(place, max_memory_size) {}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/garbage_collector.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class GarbageCollector {

class CPUGarbageCollector : public GarbageCollector {
public:
CPUGarbageCollector(const phi::CPUPlace &place, size_t max_memory_size);
CPUGarbageCollector(const CPUPlace &place, size_t max_memory_size);

protected:
void ClearCallback(const std::function<void()> &callback) override;
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/multi_trainer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -296,15 +296,15 @@ template <typename T>
void MultiTrainer::MergeToRootScope(phi::DenseTensor* root_tensor,
phi::DenseTensor* tensor) {
phi::DenseTensor tmp_root;
TensorCopy(*root_tensor, phi::CPUPlace(), &tmp_root);
TensorCopy(*root_tensor, CPUPlace(), &tmp_root);
T* tmp_root_data = tmp_root.data<T>();
phi::DenseTensor tmp_tensor;
TensorCopy(*tensor, phi::CPUPlace(), &tmp_tensor);
TensorCopy(*tensor, CPUPlace(), &tmp_tensor);
T* data = tmp_tensor.data<T>();
for (int i = 0; i < tmp_tensor.numel(); i++) {
tmp_root_data[i] += data[i];
}
TensorCopy(tmp_root, phi::CPUPlace(), root_tensor);
TensorCopy(tmp_root, CPUPlace(), root_tensor);
}
void MultiTrainer::MergeWorkerVars() {
for (size_t i = 0; i < need_merge_var_names_.size(); i++) {
Expand Down Expand Up @@ -425,7 +425,7 @@ void MultiTrainer::ResetDataset(Dataset* dataset) {
exit(-1); \
} \
phi::DenseTensor tmp_tensor; \
TensorCopy(*thread_tensor, phi::CPUPlace(), &tmp_tensor); \
TensorCopy(*thread_tensor, CPUPlace(), &tmp_tensor); \
phi::funcs::set_constant(*dev_ctx_, thread_tensor, 0.0); \
} \
} while (0)
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/new_executor/collect_shape_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,15 +109,15 @@ void CollectShapeManager::CollectShapeInfo(
if (phi::is_cpu_place(tensor.place())) {
auto &int32_tensor = tensor;
if (tensor.dtype() == phi::DataType::INT64) {
auto *cpu_ctx = pool.Get(phi::CPUPlace());
auto *cpu_ctx = pool.Get(CPUPlace());
int32_tensor = phi::funcs::TransDataType(
reinterpret_cast<const phi::CPUContext &>(*cpu_ctx),
tensor,
DataType::INT32);
}
paddle::memory::Copy(phi::CPUPlace(),
paddle::memory::Copy(CPUPlace(),
int32_host.data(),
phi::CPUPlace(),
CPUPlace(),
int32_tensor.data<int>(),
int32_tensor.numel() * sizeof(int));
} else if (phi::is_gpu_place(tensor.place())) {
Expand All @@ -130,7 +130,7 @@ void CollectShapeManager::CollectShapeInfo(
tensor,
DataType::INT32);
}
paddle::memory::Copy(phi::CPUPlace(),
paddle::memory::Copy(CPUPlace(),
int32_host.data(),
int32_tensor.place(),
int32_tensor.data<int>(),
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/new_executor/feed_fetch_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ void FetchTensors(const std::vector<std::string>& job_fetch_names,
auto* dst =
&(PADDLE_GET(phi::DenseTensor, fetch_list->at(micro_batch_id)[col]));
if (src.IsInitialized()) {
TensorCopy(src, phi::CPUPlace(), dst);
TensorCopy(src, CPUPlace(), dst);
dst->set_lod(src.lod());
} else {
VLOG(6) << "Found " << var_name
Expand All @@ -129,7 +129,7 @@ void FetchTensors(const std::vector<std::string>& job_fetch_names,
&(PADDLE_GET(phi::TensorArray, fetch_list->at(micro_batch_id)[col]));
dst->resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
TensorCopy(src[i], phi::CPUPlace(), &dst->at(i));
TensorCopy(src[i], CPUPlace(), &dst->at(i));
dst->at(i).set_lod(src[i].lod());
}
}
Expand Down Expand Up @@ -163,7 +163,7 @@ void MergeFetchTensors(const FetchUnmergedList& fetch_list,
&PADDLE_GET_CONST(phi::DenseTensor, fetch_list[micro_batch_id][i]));
}
phi::DenseTensor merged_tensor;
MergeTensors(tensors_ptr, phi::CPUPlace(), &merged_tensor);
MergeTensors(tensors_ptr, CPUPlace(), &merged_tensor);
out->at(i) = std::move(merged_tensor);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,12 @@ class CinnJitInstruction::FnPtrImpl {
const auto& tensor = [&]() -> phi::DenseTensor {
phi::DenseTensor new_tensor =
*(kernel_tensor_args[binding_info.arg_idx]);
if (new_tensor.place() == phi::CPUPlace()) {
if (new_tensor.place() == CPUPlace()) {
return new_tensor;
}
framework::TensorCopySync(
*(kernel_tensor_args[binding_info.arg_idx]),
phi::CPUPlace(),
CPUPlace(),
&new_tensor);
return new_tensor;
}();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ void HasElementsInstruction::Run() {
}

phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
bool* has_elements = pool.Get(phi::CPUPlace())->Alloc<bool>(bool_tensor_);
bool* has_elements = pool.Get(CPUPlace())->Alloc<bool>(bool_tensor_);
*has_elements = !stack_element_var_array_->empty();

if (FLAGS_check_cuda_error) [[unlikely]] {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,7 @@ void IfInstruction::Run() {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_CUSTOM_DEVICE)
phi::DenseTensor cpu_cond;
paddle::framework::TensorCopySync(
cond_tensor, phi::CPUPlace(), &cpu_cond);
paddle::framework::TensorCopySync(cond_tensor, CPUPlace(), &cpu_cond);
cond = cpu_cond.data<bool>()[0];
#else
PADDLE_THROW(common::errors::PreconditionNotMet(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) {
std::unique_ptr<phi::DenseTensor> cpu_mask{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_CUSTOM_DEVICE) || defined(PADDLE_WITH_XPU)
framework::TensorCopySync(mask, phi::CPUPlace(), cpu_mask.get());
framework::TensorCopySync(mask, CPUPlace(), cpu_mask.get());
#else
PADDLE_THROW(common::errors::Fatal(
"This version of PaddlePaddle does NOT support GPU, "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) {
std::unique_ptr<phi::DenseTensor> cpu_mask{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_CUSTOM_DEVICE) || defined(PADDLE_WITH_XPU)
framework::TensorCopySync(mask, phi::CPUPlace(), cpu_mask.get());
framework::TensorCopySync(mask, CPUPlace(), cpu_mask.get());
#else
PADDLE_THROW(common::errors::Fatal(
"This version of PaddlePaddle does NOT support GPU, "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ static double GetDenseTensorEleSum(const Scope& scope,
if (var->IsType<phi::DenseTensor>() &&
var->Get<phi::DenseTensor>().initialized()) {
phi::DenseTensor cpu_tensor;
phi::CPUPlace place;
CPUPlace place;
paddle::framework::TensorCopy(
var->Get<phi::DenseTensor>(), place, &cpu_tensor);
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ bool GetCondData(const phi::DenseTensor& cond) {
std::unique_ptr<phi::DenseTensor> cpu_cond{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_CUSTOM_DEVICE)
paddle::framework::TensorCopySync(cond, phi::CPUPlace(), cpu_cond.get());
paddle::framework::TensorCopySync(cond, CPUPlace(), cpu_cond.get());
#else
PADDLE_THROW(common::errors::PreconditionNotMet(
"This version of PaddlePaddle does NOT support GPU/XPU but got "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ void OneDNNMixedPhiKernelInstruction::Run() {
tmp_layout,
*input,
transed_tensor,
phi::CPUPlace());
CPUPlace());
tmp_kernel_context.UpdataInput(i, transed_tensor);
auto meta_tensor = phi::MetaTensor(transed_tensor);
auto input_meta_tensor = phi::MetaTensor(input);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ TensorRTEngineInstruction::TensorRTEngineInstruction(
phi::DataType type_data = phi::DataType::FLOAT32;
phi::DeviceContextPool &pool = phi::DeviceContextPool::Instance();
const phi::DeviceContext *dev_ctx = nullptr;
dev_ctx = pool.Get(phi::CPUPlace());
dev_ctx = pool.Get(CPUPlace());
dev_ctx->Alloc(tensor_temp, type_data);
tensor_out.push_back(tensor_temp);
}
Expand Down Expand Up @@ -491,14 +491,14 @@ void TensorRTEngineInstruction::BindInputTensor(
nvinfer1::TensorIOMode::kINPUT) {
shape_v.resize(input_tensor.numel());
if (input_tensor.dtype() == phi::DataType::INT32) {
phi::memory_utils::Copy(phi::CPUPlace(),
phi::memory_utils::Copy(CPUPlace(),
shape_v.data(),
input_tensor.place(),
input_tensor.data<int32_t>(),
input_tensor.numel() * sizeof(int),
nullptr);
} else if (input_tensor.dtype() == phi::DataType::INT64 && support_int64) {
phi::memory_utils::Copy(phi::CPUPlace(),
phi::memory_utils::Copy(CPUPlace(),
shape_v.data(),
input_tensor.place(),
input_tensor.data<int64_t>(),
Expand All @@ -514,7 +514,7 @@ void TensorRTEngineInstruction::BindInputTensor(
reinterpret_cast<const phi::GPUContext &>(*dev_ctx_),
input_tensor,
phi::DataType::INT32);
phi::memory_utils::Copy(phi::CPUPlace(),
phi::memory_utils::Copy(CPUPlace(),
shape_v.data(),
int32_tensor->place(),
int32_tensor->data<int32_t>(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ bool TensorShouldBeFakeInitialized(const OperatorBase& op,

if (op_type == "batch_norm" && parameter_name == "ReserveSpace") {
if (dynamic_cast<const OperatorWithKernel*>(&op)->kernel_type()->place_ ==
phi::CPUPlace()) {
CPUPlace()) {
VLOG(2) << "Skip fake initialization for: " << parameter_name;
return false;
}
Expand Down Expand Up @@ -266,7 +266,7 @@ bool TensorShouldBeFakeInitialized(const OperatorBase& op,
return op.Attr<std::string>("pooltype") == "MEAN" &&
dynamic_cast<const OperatorWithKernel*>(&op)
->kernel_type()
->place_ != phi::CPUPlace();
->place_ != CPUPlace();
}

return tensor && !IsExtendedTensor(*tensor);
Expand Down Expand Up @@ -342,7 +342,7 @@ void FakeInitializeTensor(const phi::DeviceContext& dev_ctx,
}
phi::Copy(*dev_ctx_for_copy, *tensor, place, /*blocking=*/true, tensor);
} else {
if (place == phi::CPUPlace()) {
if (place == CPUPlace()) {
dev_ctx.HostAlloc(tensor,
dtype,
/*requested_size=*/0,
Expand Down Expand Up @@ -721,7 +721,7 @@ void FakeInitializeOutputsForOperatorBase(
std::dynamic_pointer_cast<operators::reader::BufferedReader>(
reader->Get());
phi::Place target_place =
buffered_reader ? buffered_reader->GetPlace() : phi::CPUPlace();
buffered_reader ? buffered_reader->GetPlace() : CPUPlace();

auto& outputs = op.Outputs("Out");
auto& var_types = reader->VarTypes();
Expand Down
Loading
Loading