Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add vjp interface for reshard op. #63347

Merged
merged 3 commits into from
Apr 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 7 additions & 10 deletions paddle/fluid/pir/dialect/distributed/ir/dist_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,23 +45,20 @@ pir::Value shard_tensor(const pir::Value& x,
return shard_tensor_op.out();
}

pir::Value reshard(const pir::Value& x,
const phi::distributed::ProcessMesh& process_mesh,
const std::vector<int64_t>& dims_mapping) {
pir::Value reshard(
const pir::Value& x,
const phi::distributed::ProcessMesh& process_mesh,
const std::vector<int64_t>& dims_mapping,
const flat_hash_map<int64_t, phi::ReduceType>& partial_status) {
pir::IrContext* ctx = pir::IrContext::Instance();
// TODO(ywt01) get partial_status by func parameter
paddle::flat_hash_map<int64_t, phi::ReduceType> partial_status;
TensorDistAttribute tensor_dist_attr =
TensorDistAttribute::get(ctx, process_mesh, dims_mapping, partial_status);

auto reshard_op = ApiBuilder::Instance().GetBuilder()->Build<ReShardOp>(
x, tensor_dist_attr);
return reshard_op.result(0);
return reshard(x, tensor_dist_attr);
}

pir::Value reshard(const pir::Value& x,
const TensorDistAttribute& tensor_dist_attr) {
auto reshard_op = ApiBuilder::Instance().GetBuilder()->Build<ReShardOp>(
auto reshard_op = ApiBuilder::Instance().GetBuilder()->Build<ReshardOp>(
x, tensor_dist_attr);
return reshard_op.result(0);
}
Expand Down
8 changes: 5 additions & 3 deletions paddle/fluid/pir/dialect/distributed/ir/dist_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@ pir::Value shard_tensor(const pir::Value& x,
const phi::distributed::ProcessMesh& process_mesh,
const std::vector<int64_t>& dims_mapping);

pir::Value reshard(const pir::Value& x,
const phi::distributed::ProcessMesh& process_mesh,
const std::vector<int64_t>& dims_mapping);
pir::Value reshard(
const pir::Value& x,
const phi::distributed::ProcessMesh& process_mesh,
const std::vector<int64_t>& dims_mapping,
const flat_hash_map<int64_t, phi::ReduceType>& partial_status = {});

pir::Value reshard(const pir::Value& x,
const TensorDistAttribute& tensor_dist_attr);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/distributed/ir/dist_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ void DistDialect::initialize() {
TensorDistAttribute,
OperationDistAttribute>();
RegisterTypes<DistDenseTensorType>();
RegisterOps<ShardTensorOp, ReShardOp>();
RegisterOps<ShardTensorOp, ReshardOp>();
}

void DistDialect::PrintType(pir::Type type, std::ostream &os) const {
Expand Down
60 changes: 54 additions & 6 deletions paddle/fluid/pir/dialect/distributed/ir/dist_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "paddle/fluid/pir/dialect/distributed/ir/dist_op.h"
#include "paddle/fluid/pir/dialect/distributed/ir/dist_attribute.h"
#include "paddle/fluid/pir/dialect/distributed/ir/dist_type.h"
#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h"
#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h"
#include "paddle/fluid/pir/dialect/operator/ir/op_type.h"
#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h"
Expand All @@ -28,7 +29,7 @@ namespace paddle {
namespace dialect {

const char* ShardTensorOp::attributes_name[1] = {"op_dist_attr"};
const char* ReShardOp::attributes_name[1] = {"op_dist_attr"};
const char* ReshardOp::attributes_name[1] = {"op_dist_attr"};

void ShardTensorOp::VerifySig() {
VLOG(4)
Expand Down Expand Up @@ -159,8 +160,54 @@ void ShardTensorOp::Build(pir::Builder& builder,
::pir::PassStopGradientsDefaultly(argument);
}

void ReShardOp::VerifySig() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: ReShardOp.";
OpInfoTuple ReshardOp::GetOpInfo() {
return OpInfoTuple(
{OpInputInfo()}, {}, {OpOutputInfo()}, OpRunTimeInfo(), "reshard");
}

std::vector<std::vector<pir::Value>> ReshardOp::Vjp(
pir::Operation* op,
const std::vector<std::vector<pir::Value>>& inputs_,
const std::vector<std::vector<pir::Value>>& outputs,
const std::vector<std::vector<pir::Value>>& out_grads,
const std::vector<std::vector<bool>>& stop_gradients) {
VLOG(6) << "Start call vjp for reshard op.";
PADDLE_ENFORCE_EQ(
inputs_.size(),
1,
common::errors::InvalidArgument("reshard op's inputs' size should be 1"));
PADDLE_ENFORCE_EQ(inputs_[0].size(),
1,
common::errors::InvalidArgument(
"reshard op's inputs[0]'s size should be 1"));
auto dist_type = inputs_[0][0].type().dyn_cast<DistTypeInterface>();

PADDLE_ENFORCE_NOT_NULL(
dist_type,
common::errors::InvalidArgument(
"Currently, reshard op's inputs type must be dist type."));

PADDLE_ENFORCE_EQ(out_grads.size(),
1,
common::errors::InvalidArgument(
"reshard op's outputs grad size should be 1"));

PADDLE_ENFORCE_EQ(out_grads[0].size(),
1,
common::errors::InvalidArgument(
"reshard op's outputs grad[0] size should be 1"));

auto& builder = *ApiBuilder::Instance().GetBuilder();

auto grad_op =
builder.Build<ReshardOp>(out_grads[0][0], dist_type.tensor_dist_attr());

VLOG(6) << "End call vjp for reshard op.";

return {std::vector<pir::Value>{grad_op->result(0)}};
}
void ReshardOp::VerifySig() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: ReshardOp.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
Expand Down Expand Up @@ -224,11 +271,11 @@ void ReShardOp::VerifySig() {
VLOG(4) << "End Verifying for: ShardTensorOp.";
}

void ReShardOp::Build(pir::Builder& builder,
void ReshardOp::Build(pir::Builder& builder,
pir::OperationArgument& argument,
pir::Value input,
TensorDistAttribute tensor_dist_attr) {
VLOG(4) << "Start build ReShardOp";
VLOG(4) << "Start build ReshardOp";

paddle::dialect::DistDenseTensorType input_tensor_type;
if (input.type().isa<paddle::dialect::DistDenseTensorType>()) {
Expand Down Expand Up @@ -270,10 +317,11 @@ void ReShardOp::Build(pir::Builder& builder,
tensor_dist_attr,
local_shape);
argument.AddOutput(out_dist_tensor_type);
::pir::PassStopGradientsDefaultly(argument);
}

} // namespace dialect
} // namespace paddle

IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ShardTensorOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ReShardOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ReshardOp)
15 changes: 13 additions & 2 deletions paddle/fluid/pir/dialect/distributed/ir/dist_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
#pragma once
#include <vector>

#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h"
#include "paddle/fluid/pir/dialect/operator/interface/vjp.h"
#include "paddle/pir/include/core/builder.h"
#include "paddle/pir/include/core/builtin_type.h"
#include "paddle/pir/include/core/op_base.h"
Expand All @@ -39,7 +41,7 @@ class ShardTensorOp : public pir::Op<ShardTensorOp> {
void VerifySig();
};

class ReShardOp : public pir::Op<ReShardOp> {
class ReshardOp : public pir::Op<ReshardOp, VjpInterface, OpYamlInfoInterface> {
public:
using Op::Op;
static const char* name() { return "dist_op.reshard"; }
Expand All @@ -49,10 +51,19 @@ class ReShardOp : public pir::Op<ReShardOp> {
pir::OperationArgument& argument, // NOLINT
pir::Value input,
TensorDistAttribute tensor_dist_attr);

static OpInfoTuple GetOpInfo();
static std::vector<std::vector<pir::Value>> Vjp(
pir::Operation* op,
const std::vector<std::vector<pir::Value>>& inputs_,
const std::vector<std::vector<pir::Value>>& outputs,
const std::vector<std::vector<pir::Value>>& out_grads,
const std::vector<std::vector<bool>>& stop_gradients);

void VerifySig();
};
} // namespace dialect
} // namespace paddle

IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ShardTensorOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ReShardOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ReshardOp)
5 changes: 3 additions & 2 deletions paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,9 @@ struct OpRunTimeInfo {
std::vector<std::string> skip_transform_inputs;
pir::AttributeMap extra_args_default_value;
std::vector<std::string> data_format_tensors;
bool is_onednn_only;
bool dynamic_fallback;
bool is_onednn_only = false;
bool dynamic_fallback = false;
OpRunTimeInfo() = default;

OpRunTimeInfo(const std::string& infer_meta_func,
const std::vector<std::string>& infer_meta_param,
Expand Down
37 changes: 34 additions & 3 deletions paddle/fluid/pybind/dist_static_op_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
#include "paddle/fluid/pybind/pir.h"
#include "paddle/phi/core/enforce.h"

namespace paddle {
Expand Down Expand Up @@ -66,12 +67,42 @@ static PyObject *static_api_reshard(PyObject *self,
PyObject *process_mesh_obj = PyTuple_GET_ITEM(args, 1);
auto process_mesh = CastPyArg2ProcessMesh(process_mesh_obj, 1);

PyObject *dims_mapping_obj = PyTuple_GET_ITEM(args, 2);
auto dims_mapping = CastPyArg2VectorOfInt64(dims_mapping_obj, 2);
PyObject *placements_obj = PyTuple_GET_ITEM(args, 2);
auto placements = CastPyArg2VectorOfPlacement(placements_obj, 2);

int64_t ndim = GetValueDims(input).size();
std::vector<int64_t> dim_map(ndim, -1);
for (size_t i = 0; i < placements.size(); i++) {
auto &placement = placements[i];
if (placement->is_shard()) {
auto shard_dim =
dynamic_cast<const phi::distributed::Shard &>(*placement).get_dim();
PADDLE_ENFORCE_EQ(
dim_map[shard_dim],
-1,
common::errors::InvalidArgument(
"Tensor dim %lld is already sharded on mesh dim %lld,"
" DistTensor operator implementation does not support things "
"like hybrid"
" sharding strategies yet (i.e. [Shard(0), Shard(0)])",
shard_dim,
dim_map[shard_dim]));
dim_map[shard_dim] = i;
}
}
paddle::flat_hash_map<int64_t, phi::ReduceType> partial_status;
for (size_t i = 0; i < placements.size(); ++i) {
auto &p = placements[i];
if (p->is_partial()) {
partial_status.insert(
{i,
dynamic_cast<phi::distributed::Partial &>(*p).get_reduce_type()});
}
}

// Call ir static api
auto static_api_out =
paddle::dialect::reshard(input, process_mesh, dims_mapping);
paddle::dialect::reshard(input, process_mesh, dim_map, partial_status);

return ToPyObject(static_api_out);
} catch (...) {
Expand Down
7 changes: 6 additions & 1 deletion python/paddle/distributed/auto_parallel/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,15 @@
EagerParamBase,
Variable,
default_main_program,
in_pir_mode,
)
from paddle.distributed.auto_parallel import Engine, strategy as auto_strategy
from paddle.distributed.auto_parallel.interface import (
shard_tensor as shard_tensor_static,
)
from paddle.distributed.auto_parallel.placement_type import to_placements
from paddle.distributed.auto_parallel.placement_type import (
to_placements,
)
from paddle.distributed.auto_parallel.static.completion import (
mark_as_sharding_propagation_skip_op,
)
Expand Down Expand Up @@ -388,6 +391,8 @@ def reshard(dist_tensor, mesh, placements):
dist_attr._set_partial_dims(partial_dims)

return paddle.base.core.reshard(dist_tensor, dist_attr)
elif in_pir_mode():
return paddle._C_ops.reshard(dist_tensor, mesh, placements)
else:
assert isinstance(
dist_tensor, Variable
Expand Down
1 change: 1 addition & 0 deletions python/paddle/distributed/auto_parallel/static/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,6 +712,7 @@ def _prepare_program(self, mode, init_parameters=True):
# TODO(zhiqiu): fit the processes below for pir
if self._in_pir_mode:
self._parallel_pir(mode)
self._has_prepared[mode] = True
return
# Do the planning process
self._plan(mode)
Expand Down
1 change: 1 addition & 0 deletions test/auto_parallel/pir/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ if(WITH_DISTRIBUTE AND WITH_GPU)
py_test_modules(test_pir_mse_spmd MODULES test_mse_spmd_rule ENVS
FLAGS_enable_pir_api=1)
py_test_modules(test_mlp MODULES test_mlp ENVS FLAGS_enable_pir_api=1)
py_test_modules(test_reshard MODULES test_reshard ENVS FLAGS_enable_pir_api=1)
set_tests_properties(test_mlp PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE" TIMEOUT
60)
endif()
Loading