diff --git a/paddle/fluid/imperative/all_reduce.cc b/paddle/fluid/imperative/all_reduce.cc index 7f6ccc0d3549c7..e2b3bb9c436957 100644 --- a/paddle/fluid/imperative/all_reduce.cc +++ b/paddle/fluid/imperative/all_reduce.cc @@ -37,8 +37,8 @@ namespace paddle::imperative { static const phi::Place &GetVarPlace(const framework::Variable &src) { - if (src.IsType()) { - return src.Get().place(); + if (src.IsType()) { + return src.Get().place(); #if NCCL_VERSION_CODE >= 2212 } else if (src.IsType()) { return src.Get().value().place(); @@ -52,8 +52,8 @@ static const phi::Place &GetVarPlace(const framework::Variable &src) { } } -static void AllReduce(const phi::DenseTensor &src, - phi::DenseTensor *dst, +static void AllReduce(const DenseTensor &src, + DenseTensor *dst, const gpuStream_t stream, const platform::NCCLComm *comm) { const auto &place = src.place(); @@ -224,14 +224,12 @@ void AllReduce(const framework::Variable &src, platform::NCCLCommContext::Instance().Get(ring_id, place); gpuStream_t stream = (use_calc_stream ? dev_ctx->stream() : comm->stream()); - if (src.IsType()) { - if (!dst->IsType()) { + if (src.IsType()) { + if (!dst->IsType()) { dst->Clear(); } - AllReduce(src.Get(), - dst->GetMutable(), - stream, - comm); + AllReduce( + src.Get(), dst->GetMutable(), stream, comm); #if NCCL_VERSION_CODE >= 2212 } else if (src.IsType()) { if (&src != dst) { diff --git a/paddle/fluid/imperative/basic_engine.cc b/paddle/fluid/imperative/basic_engine.cc index 9062f014484914..0317bf8e45217c 100644 --- a/paddle/fluid/imperative/basic_engine.cc +++ b/paddle/fluid/imperative/basic_engine.cc @@ -100,9 +100,9 @@ void BasicEngine::Init( true, common::errors::NotFound("Tensor %s has no gradient", var->Name())); - auto& fwd_var = var->Var().Get(); + auto& fwd_var = var->Var().Get(); auto* grad_var = - var->GradVarBase()->MutableVar()->GetMutable(); + var->GradVarBase()->MutableVar()->GetMutable(); VLOG(6) << "init loss grad:" << var->GradVarBase()->Name() << " as stop_gradient false"; var->GradVarBase()->InnerSetOverriddenStopGradient(false); @@ -112,7 +112,7 @@ void BasicEngine::Init( grad_var->mutable_data(fwd_var.place(), fwd_var.type()); phi::funcs::set_constant(*dev_ctx, grad_var, 1.0f); } else { - paddle::framework::TensorCopy(grad_tensor->Var().Get(), + paddle::framework::TensorCopy(grad_tensor->Var().Get(), fwd_var.place(), *dev_ctx, grad_var); @@ -149,10 +149,9 @@ void BasicEngine::CheckBackwardInputs(const OpBase& op) { } auto* inner_var = var->MutableVar(); - phi::DenseTensor* tensor = nullptr; - if (!inner_var->IsInitialized() || - inner_var->IsType()) { - tensor = inner_var->GetMutable(); + DenseTensor* tensor = nullptr; + if (!inner_var->IsInitialized() || inner_var->IsType()) { + tensor = inner_var->GetMutable(); } if (tensor && !tensor->IsInitialized()) { @@ -340,8 +339,8 @@ static std::shared_ptr> CallGradientHooks( static bool IsInputCanInplace(const std::shared_ptr& var) { auto* inner_var = var->MutableVar(); - if (inner_var->IsInitialized() && inner_var->IsType()) { - auto tensor = inner_var->GetMutable(); + if (inner_var->IsInitialized() && inner_var->IsType()) { + auto tensor = inner_var->GetMutable(); if (tensor->IsInitialized()) { return true; } @@ -358,7 +357,7 @@ static void PerformBackwardInplace(const std::string& op_type, if (infer_inplace) { auto in_to_outs = infer_inplace(true); for (auto& pair : in_to_outs) { - phi::DenseTensor *in_tensor = nullptr, *out_tensor = nullptr; + DenseTensor *in_tensor = nullptr, *out_tensor = nullptr; for (auto& p : ins) { if (p.first == pair.first) { // has at least one var @@ -368,8 +367,7 @@ static void PerformBackwardInplace(const std::string& op_type, // the refcount of var to be inplaced should be 1 if (in_var.use_count() == 1) { if (IsInputCanInplace(in_var)) { - in_tensor = - in_var->MutableVar()->GetMutable(); + in_tensor = in_var->MutableVar()->GetMutable(); } } } @@ -383,8 +381,7 @@ static void PerformBackwardInplace(const std::string& op_type, if (!p.second.empty() && p.second[0]) { auto& out_var = p.second[0]; if (out_var->Type() == framework::proto::VarType::DENSE_TENSOR) { - out_tensor = - out_var->MutableVar()->GetMutable(); + out_tensor = out_var->MutableVar()->GetMutable(); } } } diff --git a/paddle/fluid/imperative/bkcl_context.cc b/paddle/fluid/imperative/bkcl_context.cc index c960b105b0740b..9f276290978314 100644 --- a/paddle/fluid/imperative/bkcl_context.cc +++ b/paddle/fluid/imperative/bkcl_context.cc @@ -33,8 +33,8 @@ namespace paddle { namespace imperative { -static void AllReduce(const phi::DenseTensor &src, - phi::DenseTensor *dst, +static void AllReduce(const DenseTensor &src, + DenseTensor *dst, const XPUStream stream, const platform::BKCLComm *comm) { const auto &place = src.place(); @@ -162,14 +162,12 @@ void BKCLParallelContext::AllReduceByStream(const framework::Variable &src, XPUStream stream = use_calc_stream ? dev_ctx->x_context()->xpu_stream : comm->stream(); - if (src.IsType()) { - if (!dst->IsType()) { + if (src.IsType()) { + if (!dst->IsType()) { dst->Clear(); } - AllReduce(src.Get(), - dst->GetMutable(), - stream, - comm); + AllReduce( + src.Get(), dst->GetMutable(), stream, comm); } else { PADDLE_THROW(common::errors::InvalidArgument( "XPU unsupported variable type %s for imperative allreduce, only " @@ -180,7 +178,7 @@ void BKCLParallelContext::AllReduceByStream(const framework::Variable &src, void BKCLParallelContext::Broadcast(framework::Variable *src, int ring_id) { VLOG(3) << "/// DEBUG /// start inter broadcast with ring_id: " << ring_id; - phi::DenseTensor *src_tensor = src->GetMutable(); + DenseTensor *src_tensor = src->GetMutable(); const auto &place = src_tensor->place(); platform::BKCLComm *comm = platform::BKCLCommContext::Instance().Get(ring_id, place); diff --git a/paddle/fluid/imperative/dygraph_grad_maker.h b/paddle/fluid/imperative/dygraph_grad_maker.h index fcb03521a8ea51..a5a95e5045b0df 100644 --- a/paddle/fluid/imperative/dygraph_grad_maker.h +++ b/paddle/fluid/imperative/dygraph_grad_maker.h @@ -186,8 +186,8 @@ class GradOpBaseMakerBase { if (!is_input) { auto* tensor = - grad_var_base_tmp->MutableVar()->GetMutable(); - tensor->Resize(var_base_temp->Var().Get().dims()); + grad_var_base_tmp->MutableVar()->GetMutable(); + tensor->Resize(var_base_temp->Var().Get().dims()); } vec_temp.emplace_back(grad_var_base_tmp); } else { @@ -363,14 +363,13 @@ class TracedGradOp { } else if (var_wrapper->InplaceVersionSnapshot() == var_wrapper->MutableVar()->CurrentInplaceVersion()) { return var_wrapper; - } else if (var_wrapper->MutableVar()->IsType() || + } else if (var_wrapper->MutableVar()->IsType() || var_wrapper->MutableVar()->IsType()) { - auto* tensor = - var_wrapper->MutableVar()->IsType() - ? var_wrapper->MutableVar()->GetMutable() - : var_wrapper->MutableVar() - ->GetMutable() - ->mutable_value(); + auto* tensor = var_wrapper->MutableVar()->IsType() + ? var_wrapper->MutableVar()->GetMutable() + : var_wrapper->MutableVar() + ->GetMutable() + ->mutable_value(); if (!tensor->IsInitialized()) { return var_wrapper; } diff --git a/paddle/fluid/imperative/gloo_context.cc b/paddle/fluid/imperative/gloo_context.cc index e6e4e4e7644927..03606725dcf045 100644 --- a/paddle/fluid/imperative/gloo_context.cc +++ b/paddle/fluid/imperative/gloo_context.cc @@ -79,11 +79,11 @@ void GLOOParallelContext::AllReduceByStream(const framework::Variable &src, int ring_id, bool use_calc_stream) { // AllReduce(src, dst, strategy_, ring_id, use_calc_stream); - if (src.IsType()) { - if (!dst->IsType()) { + if (src.IsType()) { + if (!dst->IsType()) { dst->Clear(); } - AllReduce(src.Get(), dst->GetMutable()); + AllReduce(src.Get(), dst->GetMutable()); } else if (src.IsType()) { if (&src != dst) { if (!dst->IsType()) { @@ -106,8 +106,8 @@ void GLOOParallelContext::AllReduceByStream(const framework::Variable &src, } } -void GLOOParallelContext::AllReduce(const phi::DenseTensor &src_tensor, - phi::DenseTensor *dst_tensor) { +void GLOOParallelContext::AllReduce(const DenseTensor &src_tensor, + DenseTensor *dst_tensor) { auto gloo_wrapper = framework::GlooWrapper::GetInstance(); dst_tensor->Resize(src_tensor.dims()); switch (framework::TransToProtoVarType(src_tensor.dtype())) { diff --git a/paddle/fluid/imperative/gloo_context.h b/paddle/fluid/imperative/gloo_context.h index 65ed63c1ee6f81..5efd0760e3b1b7 100644 --- a/paddle/fluid/imperative/gloo_context.h +++ b/paddle/fluid/imperative/gloo_context.h @@ -60,7 +60,7 @@ class GLOOParallelContext : public ParallelContext { void SynchronizeCompute() override; private: - void AllReduce(const phi::DenseTensor& src, phi::DenseTensor* dst); + void AllReduce(const DenseTensor& src, DenseTensor* dst); void AllReduce(const phi::SelectedRows& src, phi::SelectedRows* dst); private: diff --git a/paddle/fluid/imperative/gradient_accumulator.cc b/paddle/fluid/imperative/gradient_accumulator.cc index 638b7341702faf..bebbb807d632be 100644 --- a/paddle/fluid/imperative/gradient_accumulator.cc +++ b/paddle/fluid/imperative/gradient_accumulator.cc @@ -52,12 +52,12 @@ static void MoveOrCopyVar(framework::Variable* dst, } VLOG(6) << "Copy occurs when sum gradients within this graph"; - if (src->IsType()) { - auto& src_tensor = src->Get(); - if (!dst->IsType()) { + if (src->IsType()) { + auto& src_tensor = src->Get(); + if (!dst->IsType()) { dst->Clear(); } - auto* dst_tensor = dst->GetMutable(); + auto* dst_tensor = dst->GetMutable(); framework::TensorCopy(src_tensor, src_tensor.place(), dst_tensor); dst_tensor->set_lod(src_tensor.lod()); } else if (src->IsType()) { @@ -126,11 +126,11 @@ TType* GetEmptyInnerTensor(paddle::imperative::VariableWrapper* dst) { template void TensorAdd(const VarType& src, VarType* dst) { - phi::DenseTensor* dst_tensor = GetInnerMutableTensor(dst); - const phi::DenseTensor& src_tensor = GetInnerTensor(src); + DenseTensor* dst_tensor = GetInnerMutableTensor(dst); + const DenseTensor& src_tensor = GetInnerTensor(src); paddle::experimental::CheckAndTrans2Contiguous( - const_cast(&src_tensor)); + const_cast(&src_tensor)); paddle::experimental::CheckAndTrans2Contiguous(dst_tensor); auto numel = src_tensor.numel(); @@ -253,7 +253,7 @@ template PADDLE_API void TensorAdd(const paddle::Tensor& src, template void SelectedRowsAddToTensor(const VarType& src, VarType* dst) { - phi::DenseTensor* dst_tensor = GetInnerMutableTensor(dst); + DenseTensor* dst_tensor = GetInnerMutableTensor(dst); const phi::SelectedRows& src_selected_rows = GetInnerTensor(src); @@ -306,8 +306,7 @@ void SelectedRowsAddTensor(const VarType& src_selected_rows_var, VarType* dst_tensor_var) { const phi::SelectedRows& src_selected_rows = GetInnerTensor(src_selected_rows_var); - const phi::DenseTensor& src_tensor = - GetInnerTensor(src_tensor_var); + const DenseTensor& src_tensor = GetInnerTensor(src_tensor_var); paddle::experimental::CheckAndTrans2Contiguous( const_cast(&src_selected_rows)->mutable_value()); @@ -316,8 +315,7 @@ void SelectedRowsAddTensor(const VarType& src_selected_rows_var, auto data_type = framework::TransToProtoVarType(src_tensor.dtype()); auto* dev_ctx = phi::DeviceContextPool::Instance().Get(place); - phi::DenseTensor* dst_tensor = - GetInnerMutableTensor(dst_tensor_var); + DenseTensor* dst_tensor = GetInnerMutableTensor(dst_tensor_var); dst_tensor->Resize(src_tensor.dims()); dst_tensor->mutable_data(place, src_tensor.dtype()); @@ -434,8 +432,8 @@ void VariableWrapperAdd(std::shared_ptr var, bool unchange_input) { auto& src = var->Var(); auto* dst = dst_var->MutableVar(); - if (dst->IsType()) { - if (src.IsType()) { + if (dst->IsType()) { + if (src.IsType()) { TensorAdd(src, dst); } else if (src.IsType()) { SelectedRowsAddToTensor(src, dst); @@ -445,7 +443,7 @@ void VariableWrapperAdd(std::shared_ptr var, framework::ToTypeName(dst->Type()))); } } else { - if (src.IsType()) { + if (src.IsType()) { if (unchange_input) { framework::Variable new_dst; SelectedRowsAddTensor(*dst, src, &new_dst); @@ -468,8 +466,8 @@ void VariableWrapperAdd(std::shared_ptr var, static phi::Place GetPlaceOfVar(const std::shared_ptr& var) { phi::Place place; - if (var->Var().IsType()) { // NOLINT - place = var->Var().Get().place(); + if (var->Var().IsType()) { // NOLINT + place = var->Var().Get().place(); } else if (var->Var().IsType()) { place = var->Var().Get().place(); } else { @@ -502,14 +500,14 @@ void GradientAccumulator::AccumulateGrad() { VLOG(6) << "Leaf Var(" << var_->Name() << ")'s Gradient has been initialized, will accumulate on " "previous gradient."; - if (dst->IsType()) { - if (src->IsType()) { + if (dst->IsType()) { + if (src->IsType()) { TensorAdd(*src, dst); } else if (src->IsType()) { SelectedRowsAddToTensor(*src, dst); } } else if (dst->IsType()) { - if (src->IsType()) { + if (src->IsType()) { SelectedRowsAddToTensor(*dst, src); *dst = std::move(*src); } else if (src->IsType()) { @@ -617,18 +615,18 @@ void EagerGradientAccumulator::SumGrad(std::shared_ptr var, } } else { if (!dst_var->Var().IsInitialized() || - !dst_var->Var().Get().IsInitialized()) { + !dst_var->Var().Get().IsInitialized()) { VLOG(6) << "Set StopGradient Grad: " << dst_var->Name() << " as zero "; auto* dev_ctx = phi::DeviceContextPool::Instance().Get(place); if (!dst_var->Var().IsInitialized()) { - auto* tensor = dst_var->MutableVar()->GetMutable(); + auto* tensor = dst_var->MutableVar()->GetMutable(); VLOG(6) << "Dims of " << dst_var->Name() - << " is set as: " << var->Var().Get().dims(); - tensor->Resize(var->Var().Get().dims()); + << " is set as: " << var->Var().Get().dims(); + tensor->Resize(var->Var().Get().dims()); tensor->mutable_data(place, phi::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0f); } else { - auto* tensor = dst_var->MutableVar()->GetMutable(); + auto* tensor = dst_var->MutableVar()->GetMutable(); tensor->mutable_data(place, phi::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0f); } @@ -637,7 +635,7 @@ void EagerGradientAccumulator::SumGrad(std::shared_ptr var, // Type may be changed after OP run, such as VarTypeInference // so synchronous VariableWrapper with Variable. - if (dst_var->Var().IsType()) { + if (dst_var->Var().IsType()) { dst_var->SetType(framework::proto::VarType::DENSE_TENSOR); } else if (dst_var->Var().IsType()) { dst_var->SetType(framework::proto::VarType::SELECTED_ROWS); @@ -708,7 +706,7 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, continue; } - PADDLE_ENFORCE_EQ(var_info.var->Var().IsType(), + PADDLE_ENFORCE_EQ(var_info.var->Var().IsType(), true, common::errors::PermissionDenied( "Gradient var must be DenseTensor")); @@ -731,7 +729,7 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, continue; } PADDLE_ENFORCE_EQ( - var_info.var->Var().IsType() || + var_info.var->Var().IsType() || var_info.var->Var().IsType(), true, common::errors::PermissionDenied("The type of Gradient " @@ -755,18 +753,18 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, } } else { if (!dst_var->Var().IsInitialized() || - !dst_var->Var().Get().IsInitialized()) { + !dst_var->Var().Get().IsInitialized()) { VLOG(6) << "Set StopGradient Grad: " << var->Name() << " as zero"; auto* dev_ctx = phi::DeviceContextPool::Instance().Get(place); if (!dst_var->Var().IsInitialized()) { - auto* tensor = dst_var->MutableVar()->GetMutable(); + auto* tensor = dst_var->MutableVar()->GetMutable(); VLOG(6) << "Dims of " << dst_var->Name() - << " is set as: " << var->Var().Get().dims(); - tensor->Resize(var->Var().Get().dims()); + << " is set as: " << var->Var().Get().dims(); + tensor->Resize(var->Var().Get().dims()); tensor->mutable_data(place, phi::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0f); } else { - auto* tensor = dst_var->MutableVar()->GetMutable(); + auto* tensor = dst_var->MutableVar()->GetMutable(); tensor->mutable_data(place, phi::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0f); } @@ -775,7 +773,7 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, tmp_grad_vars_.clear(); } - if (dst_var->Var().IsType()) { + if (dst_var->Var().IsType()) { dst_var->SetType(framework::proto::VarType::DENSE_TENSOR); } else if (dst_var->Var().IsType()) { dst_var->SetType(framework::proto::VarType::SELECTED_ROWS); diff --git a/paddle/fluid/imperative/gradient_accumulator.h b/paddle/fluid/imperative/gradient_accumulator.h index b8e21e18c58714..9c359b2e8010bc 100644 --- a/paddle/fluid/imperative/gradient_accumulator.h +++ b/paddle/fluid/imperative/gradient_accumulator.h @@ -31,7 +31,7 @@ class GradientAccumulator { explicit GradientAccumulator(VariableWrapper* var) { // var may be initialized, so Synchronous VariableWrapper with Variable if (var && var->Var().IsInitialized()) { - if (var->Var().IsType()) { + if (var->Var().IsType()) { var->SetType(framework::proto::VarType::DENSE_TENSOR); } else if (var->Var().IsType()) { var->SetType(framework::proto::VarType::SELECTED_ROWS); diff --git a/paddle/fluid/imperative/heter_ccl_context.cc b/paddle/fluid/imperative/heter_ccl_context.cc index fdfde1d30efd59..ac28baf035966b 100644 --- a/paddle/fluid/imperative/heter_ccl_context.cc +++ b/paddle/fluid/imperative/heter_ccl_context.cc @@ -146,9 +146,9 @@ void HeterParallelContext::AllReduceByStream(const framework::Variable &src, if (inter_parallel_ctx_ != nullptr) { // copy src to cpu // dst is now the src - auto src_tensor = dst->Get(); + auto src_tensor = dst->Get(); framework::Variable src_cpu; - auto src_cpu_tensor = src_cpu.GetMutable(); + auto src_cpu_tensor = src_cpu.GetMutable(); framework::TensorCopySync(src_tensor, CPUPlace(), src_cpu_tensor); // allreduce src/cpu to dst/cpu @@ -157,8 +157,8 @@ void HeterParallelContext::AllReduceByStream(const framework::Variable &src, inter_parallel_ctx_->WaitComm(ring_id); // copy dst/cpu to dst - auto dst_cpu_tensor = dst_cpu.Get(); - auto dst_tensor = dst->GetMutable(); + auto dst_cpu_tensor = dst_cpu.Get(); + auto dst_tensor = dst->GetMutable(); framework::TensorCopySync(dst_cpu_tensor, dst_tensor->place(), dst_tensor); inter_parallel_ctx_->WaitComm(ring_id); diff --git a/paddle/fluid/imperative/infer_shape_context.h b/paddle/fluid/imperative/infer_shape_context.h index bb562e1350fb01..e214aeb99c46f0 100644 --- a/paddle/fluid/imperative/infer_shape_context.h +++ b/paddle/fluid/imperative/infer_shape_context.h @@ -224,9 +224,9 @@ class DygraphInferShapeContext : public framework::InferShapeContext { common::errors::PreconditionNotMet( "The type of %s and %s is not the same.", in, out)); - if (in_var->IsType()) { - auto& in_lod_tensor = in_var->Get(); - auto* out_lod_tensor = out_var->GetMutable(); + if (in_var->IsType()) { + auto& in_lod_tensor = in_var->Get(); + auto* out_lod_tensor = out_var->GetMutable(); out_lod_tensor->Resize(in_lod_tensor.dims()); } else { auto& in_sele_rows = in_var->Get(); @@ -441,8 +441,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext { PADDLE_ENFORCE_NOT_NULL(var, common::errors::PreconditionNotMet( "Input variable should not be null")); - if (var->IsType()) { - return var->Get().dims(); + if (var->IsType()) { + return var->Get().dims(); } else if (var->IsType()) { return var->Get().GetCompleteDims(); } else { @@ -460,8 +460,8 @@ class DygraphInferShapeContext : public framework::InferShapeContext { } void SetDim(framework::Variable* var, const DDim& dim) { - if (var->IsType()) { - var->GetMutable()->Resize(dim); + if (var->IsType()) { + var->GetMutable()->Resize(dim); } else if (var->IsType()) { var->GetMutable()->set_height(dim[0]); } else { diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index c663799cc4595d..0d8c3ba71c97cf 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -96,8 +96,8 @@ static std::string DebugString( const framework::Variable& var = vars[i]->Var(); if (!var.IsInitialized()) { ss << "NOT_INITED_VAR"; - } else if (var.IsType()) { - auto& tensor = var.Get(); + } else if (var.IsType()) { + auto& tensor = var.Get(); ss << "DenseTensor<"; if (tensor.IsInitialized()) { ss << framework::DataTypeToString( @@ -236,7 +236,7 @@ void VarBase::ClearGradient(bool set_to_zero) { } else { phi::RecordEvent record_event( "ClearGradient", phi::TracerEventType::UserDefined, 2); - auto* grad_t = grad_var_->MutableVar()->GetMutable(); + auto* grad_t = grad_var_->MutableVar()->GetMutable(); if (grad_t->IsInitialized()) { if (set_to_zero) { auto* dev_ctx = @@ -283,20 +283,20 @@ bool VarBase::_IsGradientSetEmpty() { std::shared_ptr VarBase::NewVarBase(const phi::Place& dst_place, const bool blocking) const { PADDLE_ENFORCE_EQ( - Var().IsInitialized() && (Var().IsType() || - Var().IsType()), + Var().IsInitialized() && + (Var().IsType() || Var().IsType()), true, common::errors::InvalidArgument( "Variable is not initialized or Variable's type is not " "DenseTensor or SelectedRows when getting numpy tensor")); - if (Var().IsType()) { - auto& src_tensor = Var().Get(); + if (Var().IsType()) { + auto& src_tensor = Var().Get(); // TODO(Jiabin): change this after move unique_name generator to CXX auto new_var = std::make_shared( true, Name() + std::to_string(copied_counter_++)); - auto* dst_tensor = new_var->MutableVar()->GetMutable(); + auto* dst_tensor = new_var->MutableVar()->GetMutable(); dst_tensor->set_lod(src_tensor.lod()); new_var->SetPersistable(Persistable()); new_var->SetDataType(DataType()); @@ -367,9 +367,9 @@ void VarBase::CopyFrom(const VarBase& src, const bool blocking) { } phi::Place place = src.Place(); - if (src.Var().IsType()) { - auto& src_tensor = src.Var().Get(); - auto* dst_tensor = MutableVar()->GetMutable(); + if (src.Var().IsType()) { + auto& src_tensor = src.Var().Get(); + auto* dst_tensor = MutableVar()->GetMutable(); if (dst_tensor && dst_tensor->IsInitialized()) { PADDLE_ENFORCE_EQ(dst_tensor->dims(), src_tensor.dims(), @@ -449,13 +449,13 @@ void VarBase::_CopyGradientFrom(const VarBase& src) { } VLOG(4) << " VarBase copy gradient with " << src.Name(); if (grad_var_) { - auto& src_tensor = src.Var().Get(); + auto& src_tensor = src.Var().Get(); PADDLE_ENFORCE_EQ(src_tensor.IsInitialized(), true, common::errors::InvalidArgument( "Tensor %s has not been initialized", src.Name())); - auto* grad_t = grad_var_->MutableVar()->GetMutable(); - auto* var_ = MutableVar()->GetMutable(); + auto* grad_t = grad_var_->MutableVar()->GetMutable(); + auto* var_ = MutableVar()->GetMutable(); grad_t->ShareDataWith(src_tensor); grad_t->Resize(var_->dims()); } @@ -584,13 +584,13 @@ void ClearNoNeedBufferInputs(OpBase* op) { if (!each_var) continue; auto& var = each_var->Var(); - PADDLE_ENFORCE_EQ(var.IsType(), + PADDLE_ENFORCE_EQ(var.IsType(), true, common::errors::PermissionDenied( "NoNeedBufferVars only support DenseTensor")); auto new_var = new VariableWrapper(each_var->Name()); - auto* new_tensor = new_var->MutableVar()->GetMutable(); - auto& old_tensor = var.Get(); + auto* new_tensor = new_var->MutableVar()->GetMutable(); + auto& old_tensor = var.Get(); new_tensor->Resize(old_tensor.dims()); new_tensor->set_lod(old_tensor.lod()); new_tensor->set_type(old_tensor.dtype()); diff --git a/paddle/fluid/imperative/nccl_context.cc b/paddle/fluid/imperative/nccl_context.cc index 58b946797cdff4..dd502a7d803d99 100644 --- a/paddle/fluid/imperative/nccl_context.cc +++ b/paddle/fluid/imperative/nccl_context.cc @@ -140,7 +140,7 @@ void NCCLParallelContext::AllReduceByStream(const framework::Variable &src, void NCCLParallelContext::Broadcast(framework::Variable *src, int ring_id) { VLOG(3) << "/// DEBUG /// start inter broadcast with ring_id: " << ring_id; - phi::DenseTensor *src_tensor = src->GetMutable(); + DenseTensor *src_tensor = src->GetMutable(); const auto &place = src_tensor->place(); platform::NCCLComm *comm = platform::NCCLCommContext::Instance().Get(ring_id, place); diff --git a/paddle/fluid/imperative/partial_grad_engine.cc b/paddle/fluid/imperative/partial_grad_engine.cc index ddce90c7d58970..be21554e311156 100644 --- a/paddle/fluid/imperative/partial_grad_engine.cc +++ b/paddle/fluid/imperative/partial_grad_engine.cc @@ -320,8 +320,8 @@ static void FillConstantLike(const VariableWrapper &ref_var, VariableWrapper *dst_var, const phi::Place &place, float value) { - auto &ref_tensor = ref_var.Var().Get(); - auto *dst_tensor = dst_var->MutableVar()->GetMutable(); + auto &ref_tensor = ref_var.Var().Get(); + auto *dst_tensor = dst_var->MutableVar()->GetMutable(); auto *dev_ctx = phi::DeviceContextPool::Instance().Get(place); dst_tensor->Resize(ref_tensor.dims()); // TODO(jiabin): Ugly fix here we have fwd_data_type_ and data_type, since in @@ -799,8 +799,8 @@ PartialGradTask::PartialGradTask( } else { VLOG(10) << "Use user provided grad var for " << output_targets[i]->Name(); - const auto &out_tensor = output_targets[i]->Var().Get(); - const auto &grad_tensor = output_grads[i]->Var().Get(); + const auto &out_tensor = output_targets[i]->Var().Get(); + const auto &grad_tensor = output_grads[i]->Var().Get(); PADDLE_ENFORCE_EQ( grad_tensor.dims(), out_tensor.dims(), diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 1192d9d49ecf74..1d6f2684ca2e07 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -70,9 +70,9 @@ const std::shared_ptr& GetVariableWrapper( return var; } -const phi::DenseTensor* GetTensorFromVar(const framework::Variable& var) { - if (var.IsType()) { - return &(var.Get()); +const DenseTensor* GetTensorFromVar(const framework::Variable& var) { + if (var.IsType()) { + return &(var.Get()); } else if (var.IsType()) { return &(var.Get().value()); } else { @@ -103,7 +103,7 @@ void HandleComplexGradToRealGrad(const NameVarMap& outs) { << " var `" << var->Name() << "` to " << framework::DataTypeToString(var->ForwardDataType()) << " real var in dynamic graph."; - phi::DenseTensor out; + DenseTensor out; framework::TransComplexToReal( var->ForwardDataType(), var->DataType(), *tensor, &out); SetTensorToVariable(var->Var(), out, var->MutableVar()); diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 1f9f3d2cdae45f..292434d16ba5bd 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -47,8 +47,7 @@ PADDLE_API void TestHandleComplexGradToRealGradEager( const NameVarMap& outs); #endif -PADDLE_API const phi::DenseTensor* GetTensorFromVar( - const framework::Variable& var); +PADDLE_API const DenseTensor* GetTensorFromVar(const framework::Variable& var); template static void SetForwardDataTypeOfGradVar(const std::shared_ptr& var); @@ -122,7 +121,7 @@ std::shared_ptr> PrepareData( cache_var->Var(), *tensor, tmp_var->MutableVar()); (*tmp_ins_ptr)[name_pair.first][i] = tmp_var; } else { - phi::DenseTensor out; + DenseTensor out; framework::TransformData( expected_kernel_key, kernel_type_for_var, *tensor, &out, place); if (framework::NeedTransformDataType(kernel_type_for_var, @@ -309,7 +308,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, if (it == ins.end()) { if (LIKELY(input_defs[i].type_index == - std::type_index(typeid(paddle::optional)))) { + std::type_index(typeid(paddle::optional)))) { kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr); auto end_idx = start_idx + 1; kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i); @@ -320,9 +319,8 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, input_defs[i].type_index == std::type_index(typeid(paddle::optional)) || input_defs[i].type_index == - std::type_index( - typeid(paddle::optional< - std::vector>))) { + std::type_index(typeid( + paddle::optional>))) { kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr); auto end_idx = start_idx + 1; kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i); @@ -343,8 +341,8 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, for (size_t offset = 0; offset < ins_vector.size(); ++offset) { const phi::TensorBase* tensor_in = nullptr; auto& var = ins_vector[offset]->Var(); - if (var.template IsType()) { - tensor_in = &(var.template Get()); + if (var.template IsType()) { + tensor_in = &(var.template Get()); kernel_ctx->EmplaceBackInputWithoutSetRange(tensor_in); } else if (var.template IsType()) { tensor_in = &(var.template Get()); @@ -391,8 +389,8 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, phi::TensorBase* tensor_out = nullptr; auto* var = outs_vector[offset]->MutableVar(); if (var) { - if (var->template IsType()) { - tensor_out = var->template GetMutable(); + if (var->template IsType()) { + tensor_out = var->template GetMutable(); kernel_ctx->EmplaceBackOutputWithoutSetRange(tensor_out); } else if (var->template IsType()) { tensor_out = var->template GetMutable(); @@ -696,7 +694,7 @@ void PreparePhiData(const phi::Kernel& phi_kernel, VLOG(3) << "Phi Transform Variable " << input_names[i] << " from " << tensor_in->place() << " to " << expected_place; - phi::DenseTensor tmp_tensor; + DenseTensor tmp_tensor; framework::TensorCopySync(*tensor_in, expected_place, &tmp_tensor); SetTensorToVariable(var->Var(), tmp_tensor, var->MutableVar()); diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index c21e85ad14d5f3..a7ef7b572fb641 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -34,10 +34,10 @@ namespace imperative { defined(PADDLE_WITH_CUSTOM_DEVICE) // div the nranks void Group::DivNRanks(const phi::DeviceContext &context, int64_t nranks) { - phi::DenseTensor *tensor = + DenseTensor *tensor = is_sparse_ ? sparse_contents_->GetMutable()->mutable_value() - : dense_contents_.GetMutable(); + : dense_contents_.GetMutable(); if (phi::is_gpu_place(tensor->place())) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) @@ -72,23 +72,21 @@ void Group::DivNRanks(const phi::DeviceContext &context, int64_t nranks) { template static void ConcatTensorsForAllReduce( const DeviceContext &context, - const std::vector &dense_tensors_, + const std::vector &dense_tensors_, framework::Variable *p_dense_contents) { phi::funcs::ConcatFunctor concat_functor_; - concat_functor_(context, - dense_tensors_, - 0, - p_dense_contents->GetMutable()); + concat_functor_( + context, dense_tensors_, 0, p_dense_contents->GetMutable()); } template static void SplitTensorsForAllReduce( const DeviceContext &context, framework::Variable *p_dense_contents, - std::vector *p_dense_tensors) { - auto *in = p_dense_contents->GetMutable(); - std::vector outs; - std::vector shape_refer; + std::vector *p_dense_tensors) { + auto *in = p_dense_contents->GetMutable(); + std::vector outs; + std::vector shape_refer; outs.reserve(p_dense_tensors->size()); shape_refer.reserve(p_dense_tensors->size()); @@ -111,7 +109,7 @@ static void SplitTensorsForAllReduce( template static void ConcatTensorsWithType( const DeviceContext &context, - const std::vector &dense_tensors_, + const std::vector &dense_tensors_, framework::Variable *p_dense_contents, framework::proto::VarType::Type type) { switch (type) { @@ -139,7 +137,7 @@ static void ConcatTensorsWithType( template static void SplitTensorsWithType(const DeviceContext &context, framework::Variable *p_dense_contents, - std::vector *p_dense_tensors, + std::vector *p_dense_tensors, framework::proto::VarType::Type type) { switch (type) { case framework::proto::VarType::FP16: @@ -167,10 +165,10 @@ template <> void SplitTensorsForAllReduce( const phi::XPUContext &context, framework::Variable *p_dense_contents, - std::vector *p_dense_tensors) { - auto *in = p_dense_contents->GetMutable(); - std::vector outs; - std::vector shape_refer; + std::vector *p_dense_tensors) { + auto *in = p_dense_contents->GetMutable(); + std::vector outs; + std::vector shape_refer; outs.reserve(p_dense_tensors->size()); shape_refer.reserve(p_dense_tensors->size()); @@ -187,7 +185,7 @@ void SplitTensorsForAllReduce( template <> void ConcatTensorsWithType( const phi::XPUContext &context, - const std::vector &dense_tensors_, + const std::vector &dense_tensors_, framework::Variable *p_dense_contents, framework::proto::VarType::Type type) { switch (type) { @@ -208,7 +206,7 @@ template <> void SplitTensorsWithType( const phi::XPUContext &context, framework::Variable *p_dense_contents, - std::vector *p_dense_tensors, + std::vector *p_dense_tensors, framework::proto::VarType::Type type) { switch (type) { case framework::proto::VarType::FP32: @@ -367,7 +365,7 @@ void Reducer::InitializeDenseGroups( "GRAD is SelectedRows", var_name)); - auto lod_tensor = var->MutableVar()->GetMutable(); + auto lod_tensor = var->MutableVar()->GetMutable(); PADDLE_ENFORCE_EQ(lod_tensor->IsInitialized(), true, common::errors::PreconditionNotMet( @@ -739,7 +737,7 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) { if (is_used_var) { auto var_base = vars_[var_index]->GradVarBase(); - auto tensor = var_base->MutableVar()->GetMutable(); + auto tensor = var_base->MutableVar()->GetMutable(); group_tensor.ShareDataWith(*tensor).Resize( {static_cast(length)}); } else { @@ -757,7 +755,7 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) { phi::DeviceContextPool::Instance().Get(place_)); if (HasGrad(var_index)) { auto var_base = vars_[var_index]->GradVarBase(); - auto tensor = var_base->MutableVar()->GetMutable(); + auto tensor = var_base->MutableVar()->GetMutable(); group_tensor.ShareDataWith(*tensor).Resize( {static_cast(length)}); } else { @@ -774,7 +772,7 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) { auto *dev_ctx = phi::DeviceContextPool::Instance().Get(place_); if (HasGrad(var_index)) { auto var_base = vars_[var_index]->GradVarBase(); - auto tensor = var_base->MutableVar()->GetMutable(); + auto tensor = var_base->MutableVar()->GetMutable(); group_tensor.ShareDataWith(*tensor).Resize( {static_cast(length)}); } else { @@ -848,7 +846,7 @@ void Reducer::MarkGroupReady(size_t group_index) { UNUSED auto &group = groups_[next_group_]; UNUSED const int run_order = next_group_ % nrings_; - auto *tensor = group.dense_contents_.GetMutable(); + auto *tensor = group.dense_contents_.GetMutable(); tensor->Resize(common::make_ddim({group.all_length_})) .mutable_data(place_, phi::TransToPhiDataType(group.dtype_)); @@ -924,7 +922,7 @@ void Reducer::ProcessUnusedDenseVars() { << string::join_strings(local_used_vars_, ','); const auto *dev_ctx = phi::DeviceContextPool::Instance().Get(place_); // H2D is to allreduce the local_used_vars_ - auto *global_used_tensor = global_used_vars_.GetMutable(); + auto *global_used_tensor = global_used_vars_.GetMutable(); framework::TensorFromVector( local_used_vars_, *dev_ctx, global_used_tensor); parallel_ctx_->AllReduceByStream( @@ -961,7 +959,7 @@ void Reducer::ProcessUnusedDenseVars() { // 2. destination var base auto dest_var_base = vars_[var_index]; auto *dest_tensor = - dest_var_base->MutableVar()->GetMutable(); + dest_var_base->MutableVar()->GetMutable(); const auto &dest_dims = dest_tensor->dims(); // 3. create grad var base or get grad var base @@ -973,7 +971,7 @@ void Reducer::ProcessUnusedDenseVars() { // 4. set grad tensor auto *dest_grad_tensor = - grad_var_base_tmp->MutableVar()->GetMutable(); + grad_var_base_tmp->MutableVar()->GetMutable(); const auto *dev_ctx = phi::DeviceContextPool::Instance().Get(place_); paddle::framework::TensorCopy( src_tensor, place_, *dev_ctx, dest_grad_tensor); @@ -989,8 +987,8 @@ bool Reducer::HasGrad(size_t var_index) { } const auto &var = grad_var->Var(); - if (var.IsType()) { - if (var.Get().IsInitialized()) { + if (var.IsType()) { + if (var.Get().IsInitialized()) { return true; } } else if (var.IsType()) { @@ -1108,8 +1106,8 @@ std::vector> AssignGroupBySize( << var->DataType(); auto &group_info = next_group[var_dtype_str]; int64_t var_size = -1; - if (var->Var().IsType()) { - var_size = var->Var().Get().numel(); + if (var->Var().IsType()) { + var_size = var->Var().Get().numel(); } else { VLOG(3) << "var " << var->Name() << " is not tensor or selected_rows, so skip it"; diff --git a/paddle/fluid/imperative/reducer.cu b/paddle/fluid/imperative/reducer.cu index 35b23159c4ea84..d74f90ad41e26b 100644 --- a/paddle/fluid/imperative/reducer.cu +++ b/paddle/fluid/imperative/reducer.cu @@ -18,7 +18,7 @@ namespace paddle { namespace imperative { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) -void Group::DivNRanks(phi::DenseTensor *tensor, +void Group::DivNRanks(DenseTensor *tensor, int64_t nranks, const phi::DeviceContext &context) { #ifdef PADDLE_WITH_HIP diff --git a/paddle/fluid/imperative/reducer.h b/paddle/fluid/imperative/reducer.h index f2556ec14e16ca..68028a8cf5b2ba 100644 --- a/paddle/fluid/imperative/reducer.h +++ b/paddle/fluid/imperative/reducer.h @@ -61,10 +61,10 @@ struct DivNRanksFunctor { template struct DivNRanksForAllReduce { - phi::DenseTensor* in_; + DenseTensor* in_; int64_t nranks_; const phi::DeviceContext& ctx_; - DivNRanksForAllReduce(phi::DenseTensor* in, + DivNRanksForAllReduce(DenseTensor* in, int64_t nranks, const phi::DeviceContext& ctx) : in_(in), nranks_(nranks), ctx_(ctx) {} @@ -89,7 +89,7 @@ class Group { bool is_sparse_ = false; // for concat kernel - std::vector dense_tensors_; + std::vector dense_tensors_; std::vector length_; @@ -111,7 +111,7 @@ class Group { void SplitTensors(const phi::DeviceContext& context); // use it in CUDA - void DivNRanks(phi::DenseTensor* tensor, + void DivNRanks(DenseTensor* tensor, int64_t nranks, const phi::DeviceContext& context); diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index fa808c7cfd1974..23c07692a4677a 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -442,18 +442,18 @@ void Tracer::TraceOp(const std::string& type, const std::map& inplace_map) { VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: " << use_default_attr_map; - std::map need_backup_inputs2outputs; - std::map> + std::map need_backup_inputs2outputs; + std::map> need_backup_inputs2holder; - std::map need_backup_inputs2strides; - std::map need_backup_inputs2offset; + std::map need_backup_inputs2strides; + std::map need_backup_inputs2offset; if (FLAGS_use_stride_kernel) { for (auto& iter : inplace_map) { auto inputs_iter = ins.find(iter.first); for (size_t i = 0; i < inputs_iter->second.size(); i++) { auto var = inputs_iter->second[i]->MutableVar(); - if (var->IsType()) { - auto dense_tensor = var->GetMutable(); + if (var->IsType()) { + auto dense_tensor = var->GetMutable(); if (!dense_tensor->meta().is_contiguous()) { NameTensorMap* tmp_out = const_cast(&outs); auto outputs_iter = tmp_out->find(iter.second); @@ -462,7 +462,7 @@ void Tracer::TraceOp(const std::string& type, need_backup_inputs2outputs[dense_tensor] = outputs_iter->second[i] ->MutableVar() - ->GetMutable(); + ->GetMutable(); need_backup_inputs2holder[dense_tensor] = dense_tensor->Holder(); need_backup_inputs2strides[dense_tensor] = dense_tensor->strides(); need_backup_inputs2offset[dense_tensor] = dense_tensor->offset(); @@ -519,15 +519,15 @@ void Tracer::TraceOp(const std::string& type, const std::map& inplace_map) { VLOG(6) << "Running On Eager TraceOp(less): "; - std::map need_backup_inputs2outputs; + std::map need_backup_inputs2outputs; if (FLAGS_use_stride_kernel) { for (auto& iter : inplace_map) { auto inputs_iter = ins.find(iter.first); for (size_t i = 0; i < inputs_iter->second.size(); i++) { auto var = inputs_iter->second[i]->MutableVar(); - if (var->IsType()) { - auto dense_tensor = var->GetMutable(); + if (var->IsType()) { + auto dense_tensor = var->GetMutable(); if (!dense_tensor->meta().is_contiguous()) { NameTensorMap* tmp_out = const_cast(&outs); auto outputs_iter = tmp_out->find(iter.second); @@ -536,7 +536,7 @@ void Tracer::TraceOp(const std::string& type, need_backup_inputs2outputs[dense_tensor] = outputs_iter->second[i] ->MutableVar() - ->GetMutable(); + ->GetMutable(); } } } diff --git a/paddle/fluid/imperative/var_helper.cc b/paddle/fluid/imperative/var_helper.cc index ffaf4873dc62d2..d90ff79127dfee 100644 --- a/paddle/fluid/imperative/var_helper.cc +++ b/paddle/fluid/imperative/var_helper.cc @@ -42,7 +42,7 @@ const std::shared_ptr &GetVariableWrapper( void InitializeVariable(paddle::framework::Variable *var, paddle::framework::proto::VarType::Type var_type) { if (var_type == paddle::framework::proto::VarType::DENSE_TENSOR) { - var->GetMutable(); + var->GetMutable(); } else if (var_type == paddle::framework::proto::VarType::SELECTED_ROWS) { var->GetMutable(); } else if (var_type == paddle::framework::proto::VarType::FEED_MINIBATCH) { @@ -78,8 +78,8 @@ void InitializeVariable(paddle::framework::Variable *var, template const phi::Place &GetPlace(const std::shared_ptr &var) { paddle::framework::Variable variable = var->Var(); - if (variable.IsType()) { - return variable.Get().place(); + if (variable.IsType()) { + return variable.Get().place(); } else if (variable.IsType()) { return variable.Get().place(); } else { @@ -122,7 +122,7 @@ PADDLE_API void SetType( framework::proto::VarType::Type type) { switch (type) { case paddle::framework::proto::VarType::DENSE_TENSOR: { - var->MutableVar()->GetMutable(); + var->MutableVar()->GetMutable(); break; } case paddle::framework::proto::VarType::SELECTED_ROWS: { @@ -171,12 +171,11 @@ PADDLE_API framework::proto::VarType::Type GetDataType( if (var->Var().IsType()) { return framework::TransToProtoVarType( var->Var().Get().value().type()); - } else if (var->Var().IsType()) { - return framework::TransToProtoVarType( - var->Var().Get().type()); + } else if (var->Var().IsType()) { + return framework::TransToProtoVarType(var->Var().Get().type()); } else { PADDLE_THROW(common::errors::PermissionDenied( - "We only support phi::SelectedRows and phi::DenseTensor in " + "We only support phi::SelectedRows and DenseTensor in " "eager mode, but we got %s here, please checkout your var type of " "tensor: %s", paddle::framework::ToTypeName(framework::ToVarType(var->Var().Type())), @@ -196,11 +195,11 @@ phi::DataLayout GetDataLayout(std::shared_ptr var) { template <> PADDLE_API phi::DataLayout GetDataLayout( std::shared_ptr var) { - if (var->Var().IsType()) { - return var->Var().Get().layout(); + if (var->Var().IsType()) { + return var->Var().Get().layout(); } else { PADDLE_THROW(common::errors::PermissionDenied( - "Only support phi::DenseTensor, but got %s here, please checkout " + "Only support DenseTensor, but got %s here, please checkout " "var type of " "tensor: %s", paddle::framework::ToTypeName(framework::ToVarType(var->Var().Type())), @@ -220,11 +219,11 @@ void SetDataLayout(std::shared_ptr var, const phi::DataLayout layout) { template <> PADDLE_API void SetDataLayout( std::shared_ptr var, const phi::DataLayout layout) { - if (var->Var().IsType()) { - var->MutableVar()->GetMutable()->set_layout(layout); + if (var->Var().IsType()) { + var->MutableVar()->GetMutable()->set_layout(layout); } else { PADDLE_THROW(common::errors::PermissionDenied( - "Only support phi::DenseTensor, but got %s here, please checkout " + "Only support DenseTensor, but got %s here, please checkout " "var type of " "tensor: %s", paddle::framework::ToTypeName(framework::ToVarType(var->Var().Type())), diff --git a/paddle/fluid/imperative/variable_wrapper.h b/paddle/fluid/imperative/variable_wrapper.h index 6e821cf23adcdf..7f1f66562ff738 100644 --- a/paddle/fluid/imperative/variable_wrapper.h +++ b/paddle/fluid/imperative/variable_wrapper.h @@ -103,9 +103,9 @@ class VariableWrapper { bool IsEmpty() const { bool is_empty = true; if (var_.IsInitialized()) { - const phi::DenseTensor* tensor = nullptr; - if (var_.IsType()) { - tensor = &(var_.Get()); + const DenseTensor* tensor = nullptr; + if (var_.IsType()) { + tensor = &(var_.Get()); } else if (var_.IsType()) { tensor = &(var_.Get().value()); } else { @@ -150,10 +150,10 @@ class VariableWrapper { } framework::proto::VarType::Type DataType() const { - const phi::DenseTensor* tensor = nullptr; + const DenseTensor* tensor = nullptr; if (var_.IsInitialized()) { if (type_ == framework::proto::VarType::DENSE_TENSOR) { - tensor = &(var_.Get()); + tensor = &(var_.Get()); } else if (type_ == framework::proto::VarType::SELECTED_ROWS) { tensor = &(var_.Get().value()); } else if (type_ == framework::proto::VarType::VOCAB) { @@ -192,11 +192,11 @@ class VariableWrapper { void SetDataLayout(const phi::DataLayout layout) { layout_ = layout; } const phi::Place Place() const { - const phi::DenseTensor* tensor = nullptr; + const DenseTensor* tensor = nullptr; auto place = CPUPlace(); // Default place for var not initialized. if (var_.IsInitialized()) { if (type_ == framework::proto::VarType::DENSE_TENSOR) { - tensor = &(var_.Get()); + tensor = &(var_.Get()); } else if (type_ == framework::proto::VarType::SELECTED_ROWS) { tensor = &(var_.Get().value()); } else { diff --git a/paddle/fluid/imperative/xccl_context.cc b/paddle/fluid/imperative/xccl_context.cc index dde863b43d45f9..86aaa218e8ff5d 100644 --- a/paddle/fluid/imperative/xccl_context.cc +++ b/paddle/fluid/imperative/xccl_context.cc @@ -34,8 +34,8 @@ class Variable; namespace paddle { namespace imperative { -static void XcclAllReduce(const phi::DenseTensor &src, - phi::DenseTensor *dst, +static void XcclAllReduce(const DenseTensor &src, + DenseTensor *dst, const phi::stream::stream_t &stream, const phi::ccl::CCLComm &comm) { const auto &place = src.place(); @@ -173,12 +173,12 @@ void XCCLParallelContext::AllReduceByStream(const framework::Variable &src, .Get(ring_id, place); auto stream = use_calc_stream ? dev_ctx->stream() : comm->stream(); - if (src.IsType()) { - if (!dst->IsType()) { + if (src.IsType()) { + if (!dst->IsType()) { dst->Clear(); } - XcclAllReduce(src.Get(), - dst->GetMutable(), + XcclAllReduce(src.Get(), + dst->GetMutable(), stream, comm->comm()); } else { @@ -192,7 +192,7 @@ void XCCLParallelContext::AllReduceByStream(const framework::Variable &src, void XCCLParallelContext::Broadcast(framework::Variable *src, int ring_id) { VLOG(3) << "/// DEBUG /// start inter broadcast with ring_id: " << ring_id; - phi::DenseTensor *src_tensor = src->GetMutable(); + DenseTensor *src_tensor = src->GetMutable(); const auto &place = src_tensor->place(); platform::XCCLComm *comm = platform::XCCLCommContext::Instance(place_.GetDeviceType())