Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/fluid/operators/array_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class ArrayOp : public framework::OperatorBase {
auto *i = scope.FindVar(Input("I"));
PADDLE_ENFORCE_NOT_NULL(i,
common::errors::NotFound("Input(I) is not found."));
auto &i_tensor = i->Get<phi::DenseTensor>();
auto &i_tensor = i->Get<DenseTensor>();
PADDLE_ENFORCE_EQ(i_tensor.numel(),
1,
common::errors::InvalidArgument(
Expand All @@ -54,7 +54,7 @@ class ArrayOp : public framework::OperatorBase {
i_tensor.place().GetType() == phi::AllocationType::XPU ||
i_tensor.place().GetType() == phi::AllocationType::CUSTOM) {
// FIXME: Avoid copy from GPU to CPU
phi::DenseTensor t;
DenseTensor t;
phi::Copy(dev_ctx, i_tensor, phi::CPUPlace(), false, &t);
dev_ctx.Wait();
offset = static_cast<size_t>(*t.data<int64_t>());
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/assert_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class AssertOp : public framework::OperatorBase {
PADDLE_ENFORCE_NOT_NULL(
cond_var_ptr,
common::errors::NotFound("Input(Condition) of AssertOp is not found."));
const phi::DenseTensor &cond = cond_var_ptr->Get<phi::DenseTensor>();
const DenseTensor &cond = cond_var_ptr->Get<DenseTensor>();
PADDLE_ENFORCE_EQ(
cond.numel(),
1,
Expand All @@ -79,7 +79,7 @@ class AssertOp : public framework::OperatorBase {
const std::vector<std::string> &x_names = Inputs(kData.data());
for (const std::string &name : x_names) {
const framework::Variable *x_var_ptr = scope.FindVar(name);
const phi::DenseTensor &x_tensor = x_var_ptr->Get<phi::DenseTensor>();
const DenseTensor &x_tensor = x_var_ptr->Get<DenseTensor>();
formatter.Print(x_tensor, name);
}

Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/operators/assign_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ class AssignFunctor {
AssignFunctor(framework::Variable *out, const phi::DeviceContext &dev_ctx)
: out_(out), dev_ctx_(dev_ctx) {}

void operator()(const phi::DenseTensor &lod_tensor) const {
auto &out_tensor = *out_->GetMutable<phi::DenseTensor>();
void operator()(const DenseTensor &lod_tensor) const {
auto &out_tensor = *out_->GetMutable<DenseTensor>();
copy_tensor(lod_tensor, &out_tensor);
}

Expand Down Expand Up @@ -68,8 +68,7 @@ class AssignFunctor {
}

private:
void copy_tensor(const phi::DenseTensor &lod_tensor,
phi::DenseTensor *out) const {
void copy_tensor(const DenseTensor &lod_tensor, DenseTensor *out) const {
if (!lod_tensor.IsInitialized()) return;
auto &out_tensor = *out;
paddle::framework::TensorCopy(lod_tensor, lod_tensor.place(), &out_tensor);
Expand Down
26 changes: 12 additions & 14 deletions paddle/fluid/operators/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,32 +188,30 @@ phi::KernelKey BatchNormOp::GetExpectedKernelType(
PADDLE_ENFORCE_EQ(
bn_param_type,
framework::TransToProtoVarType(
ctx.Input<phi::DenseTensor>("Scale")->dtype()),
ctx.Input<DenseTensor>("Scale")->dtype()),
common::errors::InvalidArgument("Scale input should be of float type"));
}
if (ctx.HasInput("Bias")) {
PADDLE_ENFORCE_EQ(
bn_param_type,
framework::TransToProtoVarType(
ctx.Input<phi::DenseTensor>("Bias")->dtype()),
framework::TransToProtoVarType(ctx.Input<DenseTensor>("Bias")->dtype()),
common::errors::InvalidArgument("Bias input should be of float type"));
}
PADDLE_ENFORCE_EQ(
bn_param_type,
framework::TransToProtoVarType(
ctx.Input<phi::DenseTensor>("Mean")->dtype()),
framework::TransToProtoVarType(ctx.Input<DenseTensor>("Mean")->dtype()),
common::errors::InvalidArgument("Mean input should be of float type"));
PADDLE_ENFORCE_EQ(bn_param_type,
framework::TransToProtoVarType(
ctx.Input<phi::DenseTensor>("Variance")->dtype()),
ctx.Input<DenseTensor>("Variance")->dtype()),
common::errors::InvalidArgument(
"Variance input should be of float type"));
return phi::KernelKey(input_data_type, ctx.GetPlace());
}

phi::KernelKey BatchNormOp::GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const {
#ifdef PADDLE_WITH_DNNL
// Only input require reshaping, weights and
Expand Down Expand Up @@ -387,9 +385,9 @@ phi::KernelKey BatchNormGradOp::GetExpectedKernelType(
PADDLE_THROW(
common::errors::InvalidArgument("can't find gradient variable of Y"));
}
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
const DenseTensor *t = nullptr;
if (var->IsType<DenseTensor>()) {
t = &var->Get<DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand All @@ -402,7 +400,7 @@ phi::KernelKey BatchNormGradOp::GetExpectedKernelType(

phi::KernelKey BatchNormGradOp::GetKernelTypeForVar(
const std::string &var_name,
const phi::DenseTensor &tensor,
const DenseTensor &tensor,
const phi::KernelKey &expected_kernel_type) const {
#ifdef PADDLE_WITH_DNNL
// Only input require reshaping, weights and
Expand Down Expand Up @@ -529,9 +527,9 @@ phi::KernelKey BatchNormDoubleGradOp::GetExpectedKernelType(
PADDLE_THROW(
common::errors::NotFound("cannot find gradient variable of Y"));
}
const phi::DenseTensor *t = nullptr;
if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
const DenseTensor *t = nullptr;
if (var->IsType<DenseTensor>()) {
t = &var->Get<DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/beam_search_decode_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ struct BeamSearchDecodeFunctor {
auto* dev_ctx = pool.Get(step_ids_origin_[0].place());
// Copy all tensors in the input tensor array
for (auto& step_id : step_ids_origin_) {
phi::DenseTensor out;
DenseTensor out;
if (step_id.numel() > 0) {
if (tensor_on_gpu_) {
dev_ctx->Wait();
Expand All @@ -65,7 +65,7 @@ struct BeamSearchDecodeFunctor {
auto* dev_ctx = pool.Get(step_scores_origin_[0].place());
// Copy all tensors in the input tensor array
for (auto& step_score : step_scores_origin_) {
phi::DenseTensor out;
DenseTensor out;
if (step_score.numel() > 0) {
if (tensor_on_gpu_) {
dev_ctx->Wait();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/beam_search_decode_op_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ struct BeamSearchDecoder {
* with word score.
* Param:
* sentence_vector_list: sentence_vector for each source sentence.
* id_tensor: result phi::DenseTensor for sentences of id.
* score_tensor: result phi::DenseTensor for sentences of score.
* id_tensor: result DenseTensor for sentences of id.
* score_tensor: result DenseTensor for sentences of score.
* reverse: whether ids of sentence in sentence_vector_list is reversed
* sort_by_score: whether to sort hypotheses of each sentence by scores.
*/
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/cast_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class CastOp : public framework::OperatorWithKernel {
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
// CastOp kernel's device type is decided by input tensor place
auto *tensor = ctx.Input<phi::DenseTensor>("X");
auto *tensor = ctx.Input<DenseTensor>("X");
PADDLE_ENFORCE_EQ(tensor->IsInitialized(),
true,
common::errors::PreconditionNotMet(
Expand Down
18 changes: 9 additions & 9 deletions paddle/fluid/operators/collective/recv_v2_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ phi::DDim recv_shape_info(const phi::Place &place,
phi::DataType shape_dtype = phi::DataType::INT32;

// step1: recv the shape size
phi::DenseTensor gpu_shape_size_tensor(shape_dtype);
DenseTensor gpu_shape_size_tensor(shape_dtype);
if (!group) {
gpu_shape_size_tensor.Resize({1});
gpu_shape_size_tensor.mutable_data(place, shape_dtype);
Expand All @@ -57,11 +57,11 @@ phi::DDim recv_shape_info(const phi::Place &place,
}

// copy the shape size tensor to cpu
phi::DenseTensor *cpu_shape_size_tensor = new phi::DenseTensor(shape_dtype);
DenseTensor *cpu_shape_size_tensor = new phi::DenseTensor(shape_dtype);
cpu_shape_size_tensor->Resize({1});
cpu_shape_size_tensor->mutable_data(phi::CPUPlace(), shape_dtype);
if (group) {
std::vector<phi::DenseTensor> shape_size_tensor;
std::vector<DenseTensor> shape_size_tensor;
shape_size_tensor.emplace_back(*cpu_shape_size_tensor);
auto shape_size_task = group->Recv(shape_size_tensor, peer);
} else {
Expand All @@ -73,19 +73,19 @@ phi::DDim recv_shape_info(const phi::Place &place,
VLOG(3) << "recv the shape size: " << shape_size << " from peer";

// step2: recv the shape
phi::DenseTensor gpu_shape_tensor(shape_dtype);
DenseTensor gpu_shape_tensor(shape_dtype);
if (!group) {
gpu_shape_tensor.Resize({shape_size});
gpu_shape_tensor.mutable_data(place, shape_dtype);
comm_ctx->Recv(&gpu_shape_tensor, shape_size, peer, stream);
}

// copy the shape tensor to cpu
phi::DenseTensor *cpu_shape_tensor = new phi::DenseTensor(shape_dtype);
DenseTensor *cpu_shape_tensor = new phi::DenseTensor(shape_dtype);
cpu_shape_tensor->Resize({shape_size});
cpu_shape_tensor->mutable_data(phi::CPUPlace(), shape_dtype);
if (group) {
std::vector<phi::DenseTensor> shape_tensor;
std::vector<DenseTensor> shape_tensor;
shape_tensor.emplace_back(*cpu_shape_tensor);
auto shape_task = group->Recv(shape_tensor, peer);
} else {
Expand Down Expand Up @@ -132,9 +132,9 @@ class RecvOpV2CUDAKernel : public framework::OpKernel<T> {
if (map->has(rid)) {
// Use ProcessGroup
distributed::ProcessGroup *pg = map->get(rid);
std::vector<phi::DenseTensor> out_tensor;
std::vector<DenseTensor> out_tensor;
auto out_shape = ctx.Attr<std::vector<int>>("out_shape");
auto out = ctx.Output<phi::DenseTensor>("Out");
auto out = ctx.Output<DenseTensor>("Out");
// auto out_dims = out->dims();

if (dynamic_shape) {
Expand Down Expand Up @@ -185,7 +185,7 @@ class RecvOpV2CUDAKernel : public framework::OpKernel<T> {
}

auto out_shape = ctx.Attr<std::vector<int>>("out_shape");
auto out = ctx.Output<phi::DenseTensor>("Out");
auto out = ctx.Output<DenseTensor>("Out");
// auto out_dims = out->dims();
auto numel = out->numel();

Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/operators/collective/send_v2_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,14 @@ void send_shape_info(const phi::DenseTensor& x,
int shape_size = dims.size();

// step1: send the shape size
phi::DenseTensor cpu_shape_size_tensor(shape_dtype);
DenseTensor cpu_shape_size_tensor(shape_dtype);
cpu_shape_size_tensor.Resize({1});
cpu_shape_size_tensor.mutable_data(phi::CPUPlace(), shape_dtype);
auto* cpu_data = cpu_shape_size_tensor.data<int>();
cpu_data[0] = shape_size;

if (group) {
std::vector<phi::DenseTensor> shape_size_tensor;
std::vector<DenseTensor> shape_size_tensor;
shape_size_tensor.template emplace_back(cpu_shape_size_tensor);
auto shape_size_task = group->Send(shape_size_tensor, peer);
} else {
Expand All @@ -71,7 +71,7 @@ void send_shape_info(const phi::DenseTensor& x,
VLOG(3) << "send the shape size: " << shape_size << " to peer";

// step2: send the shape
phi::DenseTensor cpu_shape_tensor(shape_dtype);
DenseTensor cpu_shape_tensor(shape_dtype);
cpu_shape_tensor.Resize({shape_size});
cpu_shape_tensor.mutable_data(phi::CPUPlace(), shape_dtype);
auto* cpu_shape_data = cpu_shape_tensor.data<int>();
Expand All @@ -80,7 +80,7 @@ void send_shape_info(const phi::DenseTensor& x,
}

if (group) {
std::vector<phi::DenseTensor> shape_tensor;
std::vector<DenseTensor> shape_tensor;
shape_tensor.template emplace_back(cpu_shape_tensor);
auto shape_task = group->Send(shape_tensor, peer);
} else {
Expand Down Expand Up @@ -119,7 +119,7 @@ class SendOpV2CUDAKernel : public framework::OpKernel<T> {
if (map->has(rid)) {
// Use ProcessGroup
distributed::ProcessGroup* pg = map->get(rid);
auto x = ctx.Input<phi::DenseTensor>("X");
auto x = ctx.Input<DenseTensor>("X");

if (dynamic_shape) {
// dynamic shape for switch send/recv
Expand All @@ -133,7 +133,7 @@ class SendOpV2CUDAKernel : public framework::OpKernel<T> {
pg);
}

std::vector<phi::DenseTensor> in_tensor;
std::vector<DenseTensor> in_tensor;
in_tensor.push_back(*x);
auto task = pg->Send(in_tensor, peer);
return;
Expand Down Expand Up @@ -168,7 +168,7 @@ class SendOpV2CUDAKernel : public framework::OpKernel<T> {
stream = ctx.cuda_device_context().stream();
}

auto x = ctx.Input<phi::DenseTensor>("X");
auto x = ctx.Input<DenseTensor>("X");
int64_t numel = x->numel();

if (dynamic_shape) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,9 @@ class ConditionalBlockInferOp : public ConditionalOp {
// vector or tensor, whether need to execute the operators in sub-block
// depends on the input variables (Input).
auto xs = InputTensors(scope, "Input");
need_run =
std::all_of(xs.begin(), xs.end(), [](const phi::DenseTensor *t) {
return t->numel() != 0;
});
need_run = std::all_of(xs.begin(), xs.end(), [](const DenseTensor *t) {
return t->numel() != 0;
});
}

if (need_run) {
Expand Down
16 changes: 7 additions & 9 deletions paddle/fluid/operators/controlflow/conditional_block_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,9 @@ class ConditionalBlockOp : public ConditionalOp {
// vector or tensor, whether need to execute the operators in sub-block
// depends on the input variables (Input).
auto xs = InputTensors(scope, ConditionalOp::kInputs);
need_run =
std::all_of(xs.begin(), xs.end(), [](const phi::DenseTensor *t) {
return t->numel() != 0;
});
need_run = std::all_of(xs.begin(), xs.end(), [](const DenseTensor *t) {
return t->numel() != 0;
});
}

if (need_run) {
Expand Down Expand Up @@ -158,10 +157,9 @@ class ConditionalBlockGradOp : public ConditionalOp {
need_run = ScalarCondition(xs);
} else {
auto xs = this->InputTensors(scope, ConditionalOp::kInputs);
need_run =
std::all_of(xs.begin(), xs.end(), [](const phi::DenseTensor *t) {
return t->numel() != 0;
});
need_run = std::all_of(xs.begin(), xs.end(), [](const DenseTensor *t) {
return t->numel() != 0;
});
}

const auto &inputs = Inputs(ConditionalOp::kInputs);
Expand Down Expand Up @@ -281,7 +279,7 @@ class ConditionalBlockGradInferShape : public framework::InferShapeBase {
class ConditionalBlockGradInferVarType : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {
// NOTE(Aurelius84): VarType of Output is phi::DenseTensor by default. In
// NOTE(Aurelius84): VarType of Output is DenseTensor by default. In
// case of Input is {Tensor, DenseTensorArray}, we need synchronous the
// Input's VarType into Input@GRAD to avoid generating {Tensor, Tensor} as
// Input@GRAD.
Expand Down
Loading
Loading