Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions paddle/fluid/framework/new_executor/collect_shape_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,15 @@ void CollectShapeManager::CollectShapeInfo(
}
auto var_name = value_exe_info->GetVarName(input.first);
auto *var = scope->FindVar(var_name);
if (!var || !var->IsType<phi::DenseTensor>()) {
if (!var || !var->IsType<DenseTensor>()) {
VLOG(3) << "input var is null : " << (var == nullptr);
VLOG(3) << "input var is dense_tensor : "
<< (var->IsType<phi::DenseTensor>());
VLOG(3) << "input var is dense_tensor : " << (var->IsType<DenseTensor>());
VLOG(3) << "input is null or not dense_tensor, jump it, and input id:"
<< input.first.impl();
continue;
}

auto tensor = var->Get<phi::DenseTensor>();
auto tensor = var->Get<DenseTensor>();
if (!tensor.has_allocation() && !instr->NoNeedBuffer().count(input.first)) {
VLOG(3) << "input tensor is has_allocation: "
<< (tensor.has_allocation());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/feed_fetch_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void SetColAttrForFeedFetchOps(std::shared_ptr<ProgramDesc> program_desc,
void SplitFeedTensors(const std::vector<std::string>& feed_names,
const int64_t micro_batch_num,
Scope* scope,
std::vector<std::vector<phi::DenseTensor>>* out);
std::vector<std::vector<DenseTensor>>* out);

void FetchTensors(const std::vector<std::string>& job_fetch_names,
const std::vector<std::string>& fetch_var_names,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ void FreeVariable(Variable* var) {
return;
}

if (var->IsType<phi::DenseTensor>()) {
Garbage garbage = var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder();
if (var->IsType<DenseTensor>()) {
Garbage garbage = var->GetMutable<DenseTensor>()->MoveMemoryHolder();
} else if (
var->IsType<
operators::reader::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ void InterpreterCoreEventGarbageCollector::Add(Variable* var,
return;
}

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder(), event, ctx);
if (var->IsType<DenseTensor>()) {
Add(var->GetMutable<DenseTensor>()->MoveMemoryHolder(), event, ctx);
} else if (
var->IsType<
operators::reader::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ void InterpreterCoreFastGarbageCollector::Add(Variable* var) {
return;
}

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder());
if (var->IsType<DenseTensor>()) {
Add(var->GetMutable<DenseTensor>()->MoveMemoryHolder());
} else if (
var->IsType<
operators::reader::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ void InterpreterCoreNoEventGarbageCollector::Add(
return;
}

if (var->IsType<phi::DenseTensor>()) {
Add(var->GetMutable<phi::DenseTensor>()->MoveMemoryHolder(), ctx);
if (var->IsType<DenseTensor>()) {
Add(var->GetMutable<DenseTensor>()->MoveMemoryHolder(), ctx);
} else if (
var->IsType<
operators::reader::
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void AssertInstruction::Run() {
}

DeviceContext().Wait();
const phi::DenseTensor& cond = cond_var_->Get<phi::DenseTensor>();
const phi::DenseTensor& cond = cond_var_->Get<DenseTensor>();

PADDLE_ENFORCE_EQ(
cond.numel(),
Expand All @@ -87,7 +87,7 @@ void AssertInstruction::Run() {
for (pir::Value val : inputs_data_val) {
const std::string& name = value_exe_info_->GetVarName(val);
const phi::DenseTensor& tensor =
value_exe_info_->GetVarByValue(val)->Get<phi::DenseTensor>();
value_exe_info_->GetVarByValue(val)->Get<DenseTensor>();
formatter.Print(tensor, name);
}
const std::string& error_msg = [&]() -> std::string {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ HasElementsInstruction::HasElementsInstruction(

type_ = OpFuncType::kCpuSync;

bool_tensor_ = value_exe_info_->GetVarByValue(op_->result(0))
->GetMutable<phi::DenseTensor>();
bool_tensor_ =
value_exe_info_->GetVarByValue(op_->result(0))->GetMutable<DenseTensor>();
bool_tensor_->Resize(phi::make_ddim({1}));

auto stack_value =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,16 +219,16 @@ void IfInstruction::Run() {
}

bool cond = true;
if (cond_var_->IsType<phi::DenseTensor>()) {
auto& cond_tensor = cond_var_->Get<phi::DenseTensor>();
if (cond_var_->IsType<DenseTensor>()) {
auto& cond_tensor = cond_var_->Get<DenseTensor>();
if (phi::is_cpu_place(cond_tensor.place())) {
cond = cond_tensor.data<bool>()[0];
} else {
// when phi::is_gpu_place(cond.place()) or
// phi::is_xpu_place(cond.place()) is true
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_CUSTOM_DEVICE)
phi::DenseTensor cpu_cond;
DenseTensor cpu_cond;
paddle::framework::TensorCopySync(cond_tensor, CPUPlace(), &cpu_cond);
cond = cpu_cond.data<bool>()[0];
#else
Expand All @@ -242,7 +242,7 @@ void IfInstruction::Run() {
auto& cond_array = cond_var_->Get<VariableRefArray>();
cond = std::all_of(
cond_array.begin(), cond_array.end(), [](const Variable* t) {
return t->Get<phi::DenseTensor>().numel() != 0;
return t->Get<DenseTensor>().numel() != 0;
});
}
if (cond) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ SelectInputInstruction::SelectInputInstruction(
SetOutputs(outputs);
}

inline int GetBranchNumber(const phi::DenseTensor &mask) {
inline int GetBranchNumber(const DenseTensor &mask) {
PADDLE_ENFORCE_EQ(
mask.numel(),
1,
Expand All @@ -64,7 +64,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) {
return mask.data<int>()[0];
}
// when phi::is_gpu_place(mask.place()) is true
std::unique_ptr<phi::DenseTensor> cpu_mask{new phi::DenseTensor()};
std::unique_ptr<DenseTensor> cpu_mask{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_CUSTOM_DEVICE) || defined(PADDLE_WITH_XPU)
framework::TensorCopySync(mask, CPUPlace(), cpu_mask.get());
Expand All @@ -81,8 +81,8 @@ class AssignFunctor {
public:
explicit AssignFunctor(Variable *out) : out_(out) {}

void operator()(const phi::DenseTensor &dense_tensor) const {
auto &out_tensor = *out_->GetMutable<phi::DenseTensor>();
void operator()(const DenseTensor &dense_tensor) const {
auto &out_tensor = *out_->GetMutable<DenseTensor>();
copy_tensor(dense_tensor, &out_tensor);
}

Expand Down Expand Up @@ -113,8 +113,7 @@ class AssignFunctor {
}

private:
void copy_tensor(const phi::DenseTensor &dense_tensor,
phi::DenseTensor *out) const {
void copy_tensor(const DenseTensor &dense_tensor, DenseTensor *out) const {
if (!dense_tensor.IsInitialized()) return;
auto &out_tensor = *out;
TensorCopy(dense_tensor, dense_tensor.place(), &out_tensor);
Expand All @@ -130,7 +129,7 @@ void SelectInputInstruction::Run() {
CUDAErrorCheck("SelectInputInstruction begin");
}

auto &mask = mask_->Get<phi::DenseTensor>();
auto &mask = mask_->Get<DenseTensor>();
size_t output_branch = static_cast<size_t>(GetBranchNumber(mask));
PADDLE_ENFORCE_LT(
output_branch,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ SelectOutputInstruction::SelectOutputInstruction(
SetOutputs(outputs);
}

inline int GetBranchNumber(const phi::DenseTensor &mask) {
inline int GetBranchNumber(const DenseTensor &mask) {
PADDLE_ENFORCE_EQ(
mask.numel(),
1,
Expand All @@ -63,7 +63,7 @@ inline int GetBranchNumber(const phi::DenseTensor &mask) {
return mask.data<int>()[0];
}
// when phi::is_gpu_place(mask.place()) is true
std::unique_ptr<phi::DenseTensor> cpu_mask{new phi::DenseTensor()};
std::unique_ptr<DenseTensor> cpu_mask{new phi::DenseTensor()};
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \
defined(PADDLE_WITH_CUSTOM_DEVICE) || defined(PADDLE_WITH_XPU)
framework::TensorCopySync(mask, CPUPlace(), cpu_mask.get());
Expand All @@ -80,8 +80,8 @@ class AssignFunctor {
public:
explicit AssignFunctor(Variable *out) : out_(out) {}

void operator()(const phi::DenseTensor &dense_tensor) const {
auto &out_tensor = *out_->GetMutable<phi::DenseTensor>();
void operator()(const DenseTensor &dense_tensor) const {
auto &out_tensor = *out_->GetMutable<DenseTensor>();
copy_tensor(dense_tensor, &out_tensor);
}

Expand Down Expand Up @@ -112,8 +112,7 @@ class AssignFunctor {
}

private:
void copy_tensor(const phi::DenseTensor &dense_tensor,
phi::DenseTensor *out) const {
void copy_tensor(const DenseTensor &dense_tensor, DenseTensor *out) const {
if (!dense_tensor.IsInitialized()) return;
auto &out_tensor = *out;
TensorCopy(dense_tensor, dense_tensor.place(), &out_tensor);
Expand All @@ -129,7 +128,7 @@ void SelectOutputInstruction::Run() {
CUDAErrorCheck("SelectOutputInstruction begin");
}

auto &mask = mask_->Get<phi::DenseTensor>();
auto &mask = mask_->Get<DenseTensor>();
size_t output_branch = static_cast<size_t>(GetBranchNumber(mask));
PADDLE_ENFORCE_LE(
output_branch,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ static std::stack<const Variable*> PopElements(VariableRefArray* var_array,
return rtn;
}
void ShareVarData(const Variable* src_var, Variable* dst_var) {
if (src_var->IsType<phi::DenseTensor>()) {
auto& src_tensor = src_var->Get<phi::DenseTensor>();
auto* tmp_dst_tensor = dst_var->GetMutable<phi::DenseTensor>();
if (src_var->IsType<DenseTensor>()) {
auto& src_tensor = src_var->Get<DenseTensor>();
auto* tmp_dst_tensor = dst_var->GetMutable<DenseTensor>();
if (src_tensor.numel() == 0) {
tmp_dst_tensor->set_meta(src_tensor.meta());
if (!src_tensor.IsInitialized()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ WhileInstruction::WhileInstruction(

void WhileInstruction::ShareInputsToOutputs() {
for (size_t i = 0; i < outputs_.size(); ++i) {
if (inputs_[i]->IsType<phi::DenseTensor>()) {
outputs_[i]->GetMutable<phi::DenseTensor>()->ShareDataWith(
inputs_[i]->Get<phi::DenseTensor>());
if (inputs_[i]->IsType<DenseTensor>()) {
outputs_[i]->GetMutable<DenseTensor>()->ShareDataWith(
inputs_[i]->Get<DenseTensor>());
} else if (inputs_[i]->IsType<phi::TensorArray>()) {
const auto& input_array = inputs_[i]->Get<phi::TensorArray>();
auto* output_array = outputs_[i]->GetMutable<phi::TensorArray>();
Expand All @@ -174,9 +174,9 @@ void WhileInstruction::ShareOutputsToBlockArgs() {
auto var_name = body_inter_->GetNameByValue(block_arg);
auto* inner_var = body_inter_->local_scope()->GetVar(var_name);

if (outputs_[i]->IsType<phi::DenseTensor>()) {
inner_var->GetMutable<phi::DenseTensor>()->ShareDataWith(
outputs_[i]->Get<phi::DenseTensor>());
if (outputs_[i]->IsType<DenseTensor>()) {
inner_var->GetMutable<DenseTensor>()->ShareDataWith(
outputs_[i]->Get<DenseTensor>());
} else if (outputs_[i]->IsType<phi::TensorArray>()) {
const auto& outer_array = outputs_[i]->Get<phi::TensorArray>();
auto* inner_array = inner_var->GetMutable<phi::TensorArray>();
Expand All @@ -192,8 +192,8 @@ void WhileInstruction::ShareOutputsToBlockArgs() {

void WhileInstruction::ShareConditionData() {
auto inner_cond_var = body_inter_->local_scope()->GetVar(inner_cond_);
cond_var_->GetMutable<phi::DenseTensor>()->ShareDataWith(
inner_cond_var->Get<phi::DenseTensor>());
cond_var_->GetMutable<DenseTensor>()->ShareDataWith(
inner_cond_var->Get<DenseTensor>());
}

void WhileInstruction::SetOutputHooks(
Expand Down Expand Up @@ -228,7 +228,7 @@ void WhileInstruction::Run() {
}

VLOG(6) << "while instruction start loop ...";
while (GetCondData(cond_var_->Get<phi::DenseTensor>())) {
while (GetCondData(cond_var_->Get<DenseTensor>())) {
VLOG(6) << "while instruction pass args to body block";
ShareOutputsToBlockArgs();
VLOG(6) << "while instruction interpretercore run";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ YieldInstruction::YieldInstruction(size_t id,
"_input_" + std::to_string(i) + "_";
Variable *fake_var = value_exe_info_->GetScope()->Var(new_name);
if (out_type.isa<paddle::dialect::AllocatedDenseTensorType>()) {
fake_var->GetMutable<phi::DenseTensor>();
fake_var->GetMutable<DenseTensor>();
input_vars_.push_back(fake_var);
} else {
PADDLE_THROW(common::errors::Unimplemented(
Expand Down Expand Up @@ -121,16 +121,15 @@ void YieldInstruction::Run() {
for (size_t i = 0; i < input_vars_.size(); ++i) {
if (input_vars_[i] == nullptr) {
output_vars_[i] = nullptr;
} else if (input_vars_[i]->IsType<phi::DenseTensor>()) {
} else if (input_vars_[i]->IsType<DenseTensor>()) {
if (input_vars_[i]->IsInitialized() &&
!input_vars_[i]->Get<phi::DenseTensor>().initialized()) {
!input_vars_[i]->Get<DenseTensor>().initialized()) {
// 对应 input 为 NULL VALUE 的情况,fake tensor
FullFakeTensor<paddle::dialect::AllocatedDenseTensorType,
phi::DenseTensor>(
FullFakeTensor<paddle::dialect::AllocatedDenseTensorType, DenseTensor>(
value_exe_info_->GetValueByVar(output_vars_[i]), output_vars_[i]);
} else {
output_vars_[i]->GetMutable<phi::DenseTensor>()->ShareDataWith(
input_vars_[i]->Get<phi::DenseTensor>());
output_vars_[i]->GetMutable<DenseTensor>()->ShareDataWith(
input_vars_[i]->Get<DenseTensor>());
}
} else if (input_vars_[i]->IsType<phi::TensorArray>()) {
const auto &inner_array = input_vars_[i]->Get<phi::TensorArray>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ void CudaGraphInstruction::Run() {
if (cuda_graph_ != nullptr && *cuda_graph_state_ref_ == 3) {
VLOG(4) << "Start replaying cuda graph @" << cuda_graph_.get();
for (size_t i = 0; i < input_vars_.size(); ++i) {
if (input_vars_[i]->IsType<phi::DenseTensor>()) {
auto* tensor = input_vars_[i]->GetMutable<phi::DenseTensor>();
if (input_vars_[i]->IsType<DenseTensor>()) {
auto* tensor = input_vars_[i]->GetMutable<DenseTensor>();
if (tensor->data() != input_tensors_.at(i).data()) {
LOG(WARNING) << "The input [" << i << "] tensor addr for "
<< "cuda graph is changed. Pay attention to this!";
Expand All @@ -192,8 +192,7 @@ void CudaGraphInstruction::Run() {

// set the output tensors into scope
for (size_t i = 0; i < output_vars_.size(); ++i) {
*(output_vars_[i]->GetMutable<phi::DenseTensor>()) =
output_tensors_.at(i);
*(output_vars_[i]->GetMutable<DenseTensor>()) = output_tensors_.at(i);
}
VLOG(4) << "Finish replaying cuda graph";
return;
Expand All @@ -206,10 +205,10 @@ void CudaGraphInstruction::Run() {
place_, cudaStreamCaptureModeRelaxed, cuda_graph_capture_pool_id_);

auto RecordTensorsForReplay = [&](const std::vector<Variable*>& vars) {
std::vector<phi::DenseTensor> record_tensors;
std::vector<DenseTensor> record_tensors;
record_tensors.reserve(vars.size());
for (auto& var : vars) {
auto& tensor = var->Get<phi::DenseTensor>();
auto& tensor = var->Get<DenseTensor>();
const auto& holder = tensor.Holder();
// Note: new_holder only record the memory address of the tensor for
// cuda graph, original tensor memory will be freed to allocator after
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ class CudaGraphInstruction : public InstructionBase {
std::vector<std::string> skip_gc_names_;

std::unique_ptr<phi::backends::gpu::CUDAGraph> cuda_graph_ = nullptr;
std::vector<phi::DenseTensor> input_tensors_;
std::vector<phi::DenseTensor> output_tensors_;
std::vector<DenseTensor> input_tensors_;
std::vector<DenseTensor> output_tensors_;
};

} // namespace framework
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ void BuildEngineInputOutputValue(pir::Operation *op,
void BuildEngineValueMap(
pir::Operation *op,
ValueExecutionInfo *value_exec_info,
std::unordered_map<pir::Value, std::vector<phi::DenseTensor *>>
std::unordered_map<pir::Value, std::vector<DenseTensor *>>
&engine_value_to_tensors,
std::unordered_map<pir::Value, std::vector<std::string>>
&engine_value_to_var_names) {
Expand All @@ -91,20 +91,19 @@ void BuildEngineValueMap(
std::string var_name = kv.second;

auto var = value_exec_info->GetVarByValue(value);
if (var->IsType<phi::DenseTensor>()) {
const phi::DenseTensor *tensor = &(var->Get<phi::DenseTensor>());
engine_value_to_tensors[value] = {const_cast<phi::DenseTensor *>(tensor)};
if (var->IsType<DenseTensor>()) {
const DenseTensor *tensor = &(var->Get<DenseTensor>());
engine_value_to_tensors[value] = {const_cast<DenseTensor *>(tensor)};
engine_value_to_var_names[value] = {var_name};
VLOG(6) << "Build engine value map for " << var_name;
} else if (var->IsType<VariableRefArray>()) {
std::vector<phi::DenseTensor *> tensors;
std::vector<DenseTensor *> tensors;
std::vector<std::string> var_names;
auto &variable_array = var->Get<VariableRefArray>();
for (size_t i = 0; i < variable_array.size(); ++i) {
if (variable_array[i]->IsType<phi::DenseTensor>()) {
const phi::DenseTensor *tensor =
&(variable_array[i]->Get<phi::DenseTensor>());
tensors.emplace_back(const_cast<phi::DenseTensor *>(tensor));
if (variable_array[i]->IsType<DenseTensor>()) {
const DenseTensor *tensor = &(variable_array[i]->Get<DenseTensor>());
tensors.emplace_back(const_cast<DenseTensor *>(tensor));
auto var_name_i = value_exec_info->GetVarName(variable_array[i]);
var_names.emplace_back(var_name_i);
VLOG(6) << "Build engine value map for Variable[" << i
Expand Down
Loading
Loading