Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix currnet current, etc #62330

Merged
merged 1 commit into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/phi/core/distributed/auto_parallel/dist_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class DistTensor final
const Placements& placements);

/// \brief Construct a empty dist tensor (for infer spmd)
/// \param dims The global dimension of the currnet Tensor.
/// \param dims The global dimension of the current Tensor.
/// \param dist_attr The distributed attributes of the current tensor.
DistTensor(const DDim& dims, const TensorDistAttr& dist_attr);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ struct InferSpmdFnImpl<Return (*)(Args...), infer_spmd_fn> {
}
};

// for vecotr slot
// for vector slot
template <typename... Tail>
struct InferSpmdFnCallHelper<const std::vector<const DistMetaTensor*>&,
Tail...> {
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/distributed/auto_parallel/proto_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,17 @@ auto_parallel::ProcessMeshProto to_proto(const ProcessMesh& process_mesh) {
}

auto_parallel::DeviceCapabilityProto to_proto(
const auto_parallel::DeviceCapability& device_capibilty) {
TO_PROTO_HELPER(device_capibilty, auto_parallel::DeviceCapabilityProto);
const auto_parallel::DeviceCapability& device_capability) {
TO_PROTO_HELPER(device_capability, auto_parallel::DeviceCapabilityProto);
}

auto_parallel::DeviceProto to_proto(const auto_parallel::Device& device) {
TO_PROTO_HELPER(device, auto_parallel::DeviceProto);
}

auto_parallel::LinkCapabilityProto to_proto(
const auto_parallel::LinkCapability& link_capibilty) {
TO_PROTO_HELPER(link_capibilty, auto_parallel::LinkCapabilityProto);
const auto_parallel::LinkCapability& link_capability) {
TO_PROTO_HELPER(link_capability, auto_parallel::LinkCapabilityProto);
}

auto_parallel::LinkProto to_proto(const auto_parallel::Link& link) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/distributed/auto_parallel/proto_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,10 @@ auto_parallel::TensorDistAttrProto to_proto(const TensorDistAttr& dist_attr);
auto_parallel::ProcessMeshProto to_proto(const ProcessMesh& dist_attr);

auto_parallel::DeviceCapabilityProto to_proto(
const auto_parallel::DeviceCapability& device_capibilty);
const auto_parallel::DeviceCapability& device_capability);
auto_parallel::DeviceProto to_proto(const auto_parallel::Device& device);
auto_parallel::LinkCapabilityProto to_proto(
const auto_parallel::LinkCapability& link_capibilty);
const auto_parallel::LinkCapability& link_capability);
auto_parallel::LinkProto to_proto(const auto_parallel::Link& link);
auto_parallel::DeviceMeshProto to_proto(const auto_parallel::DeviceMesh& link);
auto_parallel::DistributedMapperProto to_proto(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ void SameNdMeshReshardFunction::Eval(phi::DeviceContext* dev_ctx,
bool is_partial = in_partial_status.count(out_mesh_axis) != 0;

VLOG(3) << "Step4: out_mesh axis : " << out_mesh_axis
<< "; paratial state :" << is_partial;
<< "; partial state :" << is_partial;
// 4.1 Calculate the dist_attr after this transform
TensorDistAttr real_out_dist_attr(out->dist_attr());
std::vector<int64_t> real_dims_mapping =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void SameStatusReshardFunction::Eval(phi::DeviceContext* dev_ctx,
if (src == cur_global_rank) {
VLOG(3) << "Send from src " << src << " to dst " << dst;
int64_t dst_local_rank = GetLocalRankInParticipate(all_process_ids, dst);
// Sice send kernel only has input, so we don't need to infermeta
// Since send kernel only has input, so we don't need to infermeta
// actually. According to this reason, just use the kernel directly.
RESHARD_FUNCTOR_WITH_COMM(dev_ctx,
PSendKernel,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/sparse_coo_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class SparseCooTensor : public TensorBase,

/// \brief Test whether the non_zero_elements_ storage is allocated.
/// In special cases, when nnz=0, non_zero_elements_ will not need to be
/// initialized, but it is neccessary to return true here, otherwise the
/// initialized, but it is necessary to return true here, otherwise the
/// gradient will be None. return Whether the non_zero_elements_ storage is
/// allocated.
bool initialized() const override {
Expand Down Expand Up @@ -189,7 +189,7 @@ class SparseCooTensor : public TensorBase,
/// \brief get the sparse dim
int32_t sparse_dim() const;

/// \brief get the dnese dim
/// \brief get the dense dim
int32_t dense_dim() const;

/// \brief Returns the meta information of the tensor.
Expand Down