Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/compiled_program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -874,7 +874,7 @@ void BindCompiledProgram(pybind11::module &m) { // NOLINT
},
py::return_value_policy::reference);
using VarQuantScale =
std::unordered_map<std::string, std::pair<bool, phi::DenseTensor>>;
std::unordered_map<std::string, std::pair<bool, DenseTensor>>;
py::class_<ir::Pass, std::shared_ptr<ir::Pass>> pass(m, "Pass");
pass.def(py::init())
.def("has", &ir::Pass::Has)
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/pybind/data_set_py.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class IterableDatasetWrapper {
tensors_.emplace_back();
for (auto &var_name : slots_) {
auto *var = scopes_.back()->Var(var_name);
auto *t = var->GetMutable<phi::DenseTensor>();
auto *t = var->GetMutable<DenseTensor>();
tensors_.back().emplace_back(t);
}
}
Expand Down Expand Up @@ -114,15 +114,15 @@ class IterableDatasetWrapper {
exhaustive_num_ = 0;
}

std::vector<std::unordered_map<std::string, phi::DenseTensor>> Next() {
std::vector<std::unordered_map<std::string, DenseTensor>> Next() {
PADDLE_ENFORCE_EQ(
is_started_,
true,
common::errors::PreconditionNotMet(
"Reader must be started when getting next batch data."));
size_t device_num = places_.size();

std::vector<std::unordered_map<std::string, phi::DenseTensor>> result(
std::vector<std::unordered_map<std::string, DenseTensor>> result(
device_num);

size_t read_num = 0;
Expand Down Expand Up @@ -176,7 +176,7 @@ class IterableDatasetWrapper {
}

private:
bool IsValidDenseTensor(const phi::DenseTensor &tensor) const {
bool IsValidDenseTensor(const DenseTensor &tensor) const {
if (!drop_last_) return true;
return static_cast<size_t>(tensor.dims()[0]) == batch_size_;
}
Expand All @@ -193,7 +193,7 @@ class IterableDatasetWrapper {
size_t exhaustive_num_;

std::vector<std::unique_ptr<framework::Scope>> scopes_;
std::vector<std::vector<phi::DenseTensor *>> tensors_;
std::vector<std::vector<DenseTensor *>> tensors_;
bool is_started_{false};
};

Expand Down
260 changes: 130 additions & 130 deletions paddle/fluid/pybind/distributed_py.cc

Large diffs are not rendered by default.

37 changes: 18 additions & 19 deletions paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,16 +103,16 @@ void EmptyTensorInitializer(TensorObject* self,
VLOG(6) << "in EmptyTensorInitializer, create DenseTensor";
if (var_type == paddle::framework::proto::VarType::DENSE_TENSOR) {
// TODO(jiabin): Maybe support LegacyLoD later
std::shared_ptr<phi::DenseTensor> dense_tensor = nullptr;
std::shared_ptr<DenseTensor> dense_tensor = nullptr;
if (dims.size() == 1 && dims[0] == 0) {
std::shared_ptr<phi::Allocation> allocation_ptr = nullptr;
dense_tensor = std::make_shared<phi::DenseTensor>(
dense_tensor = std::make_shared<DenseTensor>(
allocation_ptr, phi::DenseTensorMeta(dtype, ddims));
} else {
// TODO(dev): we need enhance check for ddims.
dense_tensor = std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(),
phi::DenseTensorMeta(dtype, ddims));
dense_tensor =
std::make_shared<DenseTensor>(std::make_shared<phi::Allocation>(),
phi::DenseTensorMeta(dtype, ddims));
}
self->tensor.set_impl(dense_tensor);
} else if (var_type == paddle::framework::proto::VarType::SELECTED_ROWS) {
Expand Down Expand Up @@ -249,23 +249,23 @@ void InitDistTensorWithTensor(TensorObject* self,
self->tensor.set_name(name);
VLOG(4) << "Do TensorCopy from DenseTensor to DistTensor.";
if (place == src.place()) {
std::shared_ptr<phi::DenseTensor> tensor =
std::static_pointer_cast<phi::DenseTensor>(src.impl());
std::shared_ptr<DenseTensor> tensor =
std::static_pointer_cast<DenseTensor>(src.impl());
self->tensor.set_impl(
std::make_shared<DistTensor>(tensor, process_mesh, placements));
VLOG(4) << "Same place, do ShareDataWith for DistTensor.";
} else {
std::shared_ptr<phi::DenseTensor> tensor;
std::shared_ptr<DenseTensor> tensor;
if (src.initialized()) {
tensor = std::static_pointer_cast<phi::DenseTensor>(
tensor = std::static_pointer_cast<DenseTensor>(
src.copy_to(place, true).impl());
} else {
// lazy init branch. The src tensor is on undefined place.
PADDLE_ENFORCE(
src.place().GetType() == phi::AllocationType::UNDEFINED,
common::errors::InvalidArgument("Only undefined place is support for "
"uninitialized input tensor."));
tensor = std::static_pointer_cast<phi::DenseTensor>(src.impl());
tensor = std::static_pointer_cast<DenseTensor>(src.impl());
}
self->tensor.set_impl(
std::make_shared<DistTensor>(tensor, process_mesh, placements));
Expand Down Expand Up @@ -302,15 +302,14 @@ void InitDistTensorWithTensor(TensorObject* self,
auto global_ddims = common::make_ddim(global_dims);
VLOG(4) << "Do TensorCopy from DenseTensor to DistTensor.";
if (place == local_tensor.place()) {
std::shared_ptr<phi::DenseTensor> tensor =
std::static_pointer_cast<phi::DenseTensor>(local_tensor.impl());
std::shared_ptr<DenseTensor> tensor =
std::static_pointer_cast<DenseTensor>(local_tensor.impl());
self->tensor.set_impl(std::make_shared<DistTensor>(
tensor, global_ddims, process_mesh, placements));
VLOG(4) << "Same place, do ShareDataWith for DistTensor.";
} else {
std::shared_ptr<phi::DenseTensor> tensor =
std::static_pointer_cast<phi::DenseTensor>(
local_tensor.copy_to(place, true).impl());
std::shared_ptr<DenseTensor> tensor = std::static_pointer_cast<DenseTensor>(
local_tensor.copy_to(place, true).impl());
self->tensor.set_impl(std::make_shared<DistTensor>(
tensor, global_ddims, process_mesh, placements));
VLOG(4) << "Different place, do TensorCopy for DistTensor.";
Expand Down Expand Up @@ -357,10 +356,10 @@ void InitTensorWithFrameworkTensor(TensorObject* self,
const std::string& name) {
self->tensor.set_name(name);
if (place == src.place()) {
self->tensor.set_impl(std::make_shared<phi::DenseTensor>(src));
self->tensor.set_impl(std::make_shared<DenseTensor>(src));
VLOG(4) << "Same place, do ShareDataWith";
} else {
auto temp = paddle::Tensor(std::make_shared<phi::DenseTensor>(src));
auto temp = paddle::Tensor(std::make_shared<DenseTensor>(src));
self->tensor.set_impl(temp.copy_to(place, true).impl());
VLOG(4) << "Different place, do TensorCopy";
}
Expand Down Expand Up @@ -582,7 +581,7 @@ void AutoInitTensorByPyArray(TensorObject* py_tensor_ptr,
InitTensorWithNumpyValue(py_tensor_ptr, numpy_value, place, zero_copy);
}

// initialize Tensor by Tensor or phi::DenseTensor (mix args and
// initialize Tensor by Tensor or DenseTensor (mix args and
// kwargs) automatically.
void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map,
Expand Down Expand Up @@ -660,7 +659,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
}
} else {
// init by framework tensor
phi::DenseTensor src_tensor;
DenseTensor src_tensor;
if (kw_order_map["value"] <= args_num) {
src_tensor = CastPyArg2FrameworkTensor(
PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ static PyObject* eager_api_read_next_tensor_list(PyObject* self,
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
autograd_meta->SetPersistable(false);
autograd_meta->SetStopGradient(true);
tensor.set_impl(std::make_shared<phi::DenseTensor>(tensor_base));
tensor.set_impl(std::make_shared<DenseTensor>(tensor_base));
return tensor;
};
for (auto& tensor_base : tensor_base_list) {
Expand Down Expand Up @@ -530,9 +530,9 @@ static Tensor InitializedEmptyTensor() {
egr::Controller::Instance().GenerateUniqueName("generated_tensor"));
auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
autograd_meta->SetPersistable(false);
std::shared_ptr<phi::DenseTensor> dense_tensor = nullptr;
std::shared_ptr<DenseTensor> dense_tensor = nullptr;
std::shared_ptr<phi::Allocation> allocation_ptr = nullptr;
dense_tensor = std::make_shared<phi::DenseTensor>(
dense_tensor = std::make_shared<DenseTensor>(
allocation_ptr, phi::DenseTensorMeta(phi::DataType::FLOAT32, ddims));
tensor.set_impl(dense_tensor);
autograd_meta->SetGradNode(
Expand Down Expand Up @@ -956,9 +956,9 @@ static PyObject* eager_api_sparse_coo_tensor(PyObject* self,
non_zero_elements.is_dense_tensor(),
common::errors::Fatal("the non-zero elements must be a DenseTensor."));
auto dense_indices =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
std::dynamic_pointer_cast<DenseTensor>(non_zero_indices.impl());
auto dense_elements =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
std::dynamic_pointer_cast<DenseTensor>(non_zero_elements.impl());
// TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to
// sort and merge duplicate indices
std::shared_ptr<phi::SparseCooTensor> coo_tensor =
Expand Down Expand Up @@ -1005,11 +1005,11 @@ static PyObject* eager_api_sparse_csr_tensor(PyObject* self,
common::errors::Fatal("the non-zero elements must be a DenseTensor."));

auto dense_crows =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
std::dynamic_pointer_cast<DenseTensor>(non_zero_crows.impl());
auto dense_cols =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
std::dynamic_pointer_cast<DenseTensor>(non_zero_cols.impl());
auto dense_elements =
std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
std::dynamic_pointer_cast<DenseTensor>(non_zero_elements.impl());
std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
std::make_shared<phi::SparseCsrTensor>(*dense_crows,
*dense_cols,
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,7 @@ static bool CollectGradInformationFromOpInfo(
ins[in_name].emplace_back(std::make_shared<paddle::imperative::VarBase>(
"auto_" + in_name + "_" + std::to_string(i)));
ins[in_name][i]->SetOverriddenStopGradient(false);
ins[in_name][i]->MutableVar()->GetMutable<phi::DenseTensor>();
ins[in_name][i]->MutableVar()->GetMutable<DenseTensor>();
}
} else {
for (const proto::OpProto::Var& input : op_proto.inputs()) {
Expand All @@ -854,7 +854,7 @@ static bool CollectGradInformationFromOpInfo(
ins[in_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + in_name)};
ins[in_name][0]->SetOverriddenStopGradient(false);
ins[in_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
ins[in_name][0]->MutableVar()->GetMutable<DenseTensor>();
}
}
VLOG(6) << "Prepared Forward Ins Map, size = " << ins.size();
Expand All @@ -872,7 +872,7 @@ static bool CollectGradInformationFromOpInfo(
outs[out_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + out_name)};
outs[out_name][0]->SetOverriddenStopGradient(false);
outs[out_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
outs[out_name][0]->MutableVar()->GetMutable<DenseTensor>();
}
VLOG(6) << "Prepared Forward Outs Map, size = " << outs.size();

Expand Down
Loading
Loading