diff --git a/paddle/fluid/framework/naive_executor.h b/paddle/fluid/framework/naive_executor.h index 944d1c2647eaf9..261ec5de91e57f 100644 --- a/paddle/fluid/framework/naive_executor.h +++ b/paddle/fluid/framework/naive_executor.h @@ -86,7 +86,7 @@ class PADDLE_API NaiveExecutor { bool switch_stream = false); // Get an tensor to operating directly, without the need for feed_ops. - phi::DenseTensor* FindTensor(const std::string& name); + DenseTensor* FindTensor(const std::string& name); Scope* GetScope() { return scope_; } @@ -116,9 +116,9 @@ class PADDLE_API NaiveExecutor { std::vector pir_input_hookfuncs_; // Record information that tensor_a should ShareBufferWith tensor_b. - std::unordered_map> + std::unordered_map> reuse_cache_; - std::vector cluster_buffer_; + std::vector cluster_buffer_; std::unique_ptr interpreter_core_; }; diff --git a/paddle/fluid/framework/new_executor/feed_fetch_utils.cc b/paddle/fluid/framework/new_executor/feed_fetch_utils.cc index 88d34c6ae75189..56c1baf1ddcc68 100644 --- a/paddle/fluid/framework/new_executor/feed_fetch_utils.cc +++ b/paddle/fluid/framework/new_executor/feed_fetch_utils.cc @@ -51,8 +51,8 @@ void SetColAttrForFeedFetchOps(std::shared_ptr program_desc, void SplitFeedTensors(const std::vector& feed_names, const int64_t micro_batch_num, Scope* scope, - std::vector>* out) { - std::vector feed_tensors; + std::vector>* out) { + std::vector feed_tensors; for (size_t i = 0; i < feed_names.size(); ++i) { auto feed_name = feed_names[i]; auto feed_var = scope->GetVar(feed_name); @@ -60,7 +60,7 @@ void SplitFeedTensors(const std::vector& feed_names, feed_var, common::errors::NotFound("Variable %s should not be nullptr.", feed_names[i])); - feed_tensors.push_back(feed_var->Get()); + feed_tensors.push_back(feed_var->Get()); } out->resize(micro_batch_num); @@ -109,10 +109,10 @@ void FetchTensors(const std::vector& job_fetch_names, int col = find(fetch_var_names.begin(), fetch_var_names.end(), var_name) - fetch_var_names.begin(); auto* var = scope->FindVar(var_name); - if (var->IsType()) { - auto& src = var->Get(); + if (var->IsType()) { + auto& src = var->Get(); auto* dst = - &(PADDLE_GET(phi::DenseTensor, fetch_list->at(micro_batch_id)[col])); + &(PADDLE_GET(DenseTensor, fetch_list->at(micro_batch_id)[col])); if (src.IsInitialized()) { TensorCopy(src, CPUPlace(), dst); dst->set_lod(src.lod()); @@ -156,21 +156,21 @@ void MergeFetchTensors(const FetchUnmergedList& fetch_list, out->resize(fetch_list[0].size()); for (size_t i = 0; i < fetch_list[0].size(); ++i) { - std::vector tensors_ptr; + std::vector tensors_ptr; for (auto micro_batch_id = 0; micro_batch_id < micro_batch_num; ++micro_batch_id) { tensors_ptr.push_back( - &PADDLE_GET_CONST(phi::DenseTensor, fetch_list[micro_batch_id][i])); + &PADDLE_GET_CONST(DenseTensor, fetch_list[micro_batch_id][i])); } - phi::DenseTensor merged_tensor; + DenseTensor merged_tensor; MergeTensors(tensors_ptr, CPUPlace(), &merged_tensor); out->at(i) = std::move(merged_tensor); } } -void MergeTensors(const std::vector& tensors, +void MergeTensors(const std::vector& tensors, const phi::Place dst_place, - phi::DenseTensor* target) { + DenseTensor* target) { PADDLE_ENFORCE_EQ( tensors.empty(), false, @@ -201,7 +201,7 @@ void MergeTensors(const std::vector& tensors, new_type, framework::TransToProtoVarType(t->dtype()), common::errors::InvalidArgument( - "phi::DenseTensor data type does not match, expected type is %s, " + "DenseTensor data type does not match, expected type is %s, " "actual " "type is %s.", DataTypeToString(new_type), @@ -210,7 +210,7 @@ void MergeTensors(const std::vector& tensors, new_layout, t->layout(), common::errors::InvalidArgument( - "phi::DenseTensor layout does not match, expected layout is %s, " + "DenseTensor layout does not match, expected layout is %s, " "actual layout is %s.", common::DataLayoutToString(new_layout), common::DataLayoutToString(t->layout()))); diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index 079486b44468d1..e1544e4e0f9d6c 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -339,3 +339,7 @@ class PADDLE_API DenseTensor : public TensorBase, }; } // namespace phi + +namespace paddle { +using DenseTensor = phi::DenseTensor; +}