From ba51ce3a7218b66c07f266ad35f7e6a120918275 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 9 Jan 2025 10:07:10 +0800 Subject: [PATCH] fix --- _typos.toml | 11 ----------- .../hlir/dialect/operator/ir/generate_shape_util.cc | 8 ++++---- paddle/cinn/ir/utils/ir_copy.cc | 4 ++-- .../fluid/framework/fleet/heter_ps/heter_comm_inl.h | 2 +- .../fluid/inference/tensorrt/convert/op_converter.h | 2 +- paddle/fluid/pir/utils/general_functions.cc | 2 +- paddle/fluid/pir/utils/general_functions.h | 2 +- paddle/phi/infermeta/fusion.cc | 2 +- .../cutlass_extensions/gemm/threadblock/dq_mma_base.h | 2 +- .../memory_efficient_attention/gemm/custom_mma_base.h | 2 +- .../memory_efficient_attention/gemm/mma_from_smem.h | 2 +- paddle/pir/src/core/op_operand_impl.cc | 2 +- paddle/pir/src/core/program.cc | 2 +- paddle/pir/src/core/value_impl.cc | 4 ++-- paddle/pir/src/core/value_impl.h | 10 +++++----- .../distributed/auto_parallel/static/pir_pass.py | 2 +- python/paddle/optimizer/optimizer.py | 4 ++-- test/cpp/pir/core/paddle_fatal_test.cc | 4 ++-- 18 files changed, 28 insertions(+), 39 deletions(-) diff --git a/_typos.toml b/_typos.toml index cfd08daf4c29e..9d905a4952152 100644 --- a/_typos.toml +++ b/_typos.toml @@ -65,19 +65,8 @@ insid = 'insid' insepection = 'insepection' intall = 'intall' instanciate = 'instanciate' -occured = 'occured' -Ocurred = 'Ocurred' -occures = 'occures' -offets = 'offets' -offseted = 'offseted' Operants = 'Operants' operants = 'operants' -oeprations = 'oeprations' -Opeartion = 'Opeartion' -operaion = 'operaion' -oppotunity = 'oppotunity' -optimzed = 'optimzed' -optmize = 'optmize' optin = 'optin' Optin = 'Optin' rder = 'rder' diff --git a/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc b/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc index 1844a0e7ed661..4f5fb5ee67c5f 100644 --- a/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc +++ b/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc @@ -587,7 +587,7 @@ std::vector GetMinimalInputs( const std::vector& input_tensors) { std::unordered_set handled_dim_exprs; std::unordered_set first_occurred_input_tensors; - auto TryCollectFirstOcurredInput_tensor = + auto TryCollectFirstOccurredInput_tensor = [&](pir::Value input_tensor, const std::vector& dim_exprs) { for (const auto& dim_expr : dim_exprs) { @@ -601,11 +601,11 @@ std::vector GetMinimalInputs( const auto& shape_or_data_dim_exprs = ShapeOrDataDimExprs4Value(input_tensor); if (shape_or_data_dim_exprs.data().has_value()) { - TryCollectFirstOcurredInput_tensor( + TryCollectFirstOccurredInput_tensor( input_tensor, shape_or_data_dim_exprs.data().value()); } - TryCollectFirstOcurredInput_tensor(input_tensor, - shape_or_data_dim_exprs.shape()); + TryCollectFirstOccurredInput_tensor(input_tensor, + shape_or_data_dim_exprs.shape()); } std::vector ret{}; ret.reserve(input_tensors.size()); diff --git a/paddle/cinn/ir/utils/ir_copy.cc b/paddle/cinn/ir/utils/ir_copy.cc index 36270adb26352..f9ca7c0217307 100644 --- a/paddle/cinn/ir/utils/ir_copy.cc +++ b/paddle/cinn/ir/utils/ir_copy.cc @@ -273,7 +273,7 @@ struct IRCopyVisitor : public ir::IRVisitorRequireReImpl { auto domain = Visit(op->domain); auto buffer_expr = Expr(op->buffer); // TODO(Superjomn) copy the operation. - auto operaion = op->operation; + auto operation = op->operation; auto name = op->name; auto tensor = make_shared<_Tensor_>(); @@ -289,7 +289,7 @@ struct IRCopyVisitor : public ir::IRVisitorRequireReImpl { tensor->domain = domain; tensor->shape = shape; tensor->reduce_axis = op->reduce_axis; - tensor->operation = operaion; + tensor->operation = operation; tensor->name = name; tensor->set_type(op->type()); tensor->axis_ = op->axis_; diff --git a/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h b/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h index 2524c86eb89a4..e87b145ef9334 100644 --- a/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h +++ b/paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h @@ -3141,7 +3141,7 @@ void HeterComm:: // local use end scatter(len is node num), reote use before scatter(len is // pull size) recompute offsets and parts - VLOG(2) << "begin recalc local and remote size and offets"; + VLOG(2) << "begin recalc local and remote size and offsets"; for (int i = 0; i < node_size_; i++) { size_t local_size = 0; size_t remote_size = 0; diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index bae972efce777..094233f80b996 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -818,7 +818,7 @@ class OpConverter { // layer->getOutput(i)->getDimensions().nbDims, // 0, // common::errors::InvalidArgument( - // "Error occures in Paddle-TRT layer with output name: %s", + // "Error occurred in Paddle-TRT layer with output name: %s", // output_tensor_names[i].c_str())); } layer->setName((layer_name + ")").c_str()); diff --git a/paddle/fluid/pir/utils/general_functions.cc b/paddle/fluid/pir/utils/general_functions.cc index dc030989dc308..b0473ca7be642 100644 --- a/paddle/fluid/pir/utils/general_functions.cc +++ b/paddle/fluid/pir/utils/general_functions.cc @@ -139,7 +139,7 @@ pir::Type TranslateToIrDataType(phi::DataType dtype) { return data_type; } -pir::Operation* CreateOpeartionByName(const std::string& op_name, +pir::Operation* CreateOperationByName(const std::string& op_name, const std::vector& inputs, const pir::AttributeMap& attrs, const pir::PatternRewriter& rewriter) { diff --git a/paddle/fluid/pir/utils/general_functions.h b/paddle/fluid/pir/utils/general_functions.h index d63b3e089f199..f1b5c72caf526 100644 --- a/paddle/fluid/pir/utils/general_functions.h +++ b/paddle/fluid/pir/utils/general_functions.h @@ -96,7 +96,7 @@ pir::Type TranslateToIrDataType(phi::DataType dtype); * * @return pir::Operation* */ -pir::Operation* CreateOpeartionByName(const std::string& op_name, +pir::Operation* CreateOperationByName(const std::string& op_name, const std::vector& inputs, const pir::AttributeMap& attrs, const pir::PatternRewriter& rewriter); diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index a76507ed3e666..6e4b6ae76aa8c 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -4398,7 +4398,7 @@ void CrossAttentionXPUInferMeta( phi::make_ddim({input_q_dims[0], input_q_dims[1], head_num * head_dim})); qkv->set_dtype(out_dtype); qkv->set_layout(input_q.layout()); - // TODO(Terry) optmize the max value num + // TODO(Terry) optimize the max value num // unable to pass few PR-CIs, so just use a constant value // int xpu2_max_value_num = phi::backends::xpu::get_xpu_max_ptr_size(-1); const int xpu2_max_value_num = 6; diff --git a/paddle/phi/kernels/fusion/cutlass/cutlass_extensions/gemm/threadblock/dq_mma_base.h b/paddle/phi/kernels/fusion/cutlass/cutlass_extensions/gemm/threadblock/dq_mma_base.h index 7932921d69434..53fffb6c80c3d 100644 --- a/paddle/phi/kernels/fusion/cutlass/cutlass_extensions/gemm/threadblock/dq_mma_base.h +++ b/paddle/phi/kernels/fusion/cutlass/cutlass_extensions/gemm/threadblock/dq_mma_base.h @@ -114,7 +114,7 @@ class DqMmaBase { Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; - /// Number of warp-level GEMM oeprations + /// Number of warp-level GEMM operations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); diff --git a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/custom_mma_base.h b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/custom_mma_base.h index 70c5210f41b15..1256856020934 100644 --- a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/custom_mma_base.h +++ b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/custom_mma_base.h @@ -106,7 +106,7 @@ class CustomMmaBase { Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; - /// Number of warp-level GEMM oeprations + /// Number of warp-level GEMM operations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); diff --git a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/mma_from_smem.h b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/mma_from_smem.h index be40e2b2b9d40..d165a4e9ceedc 100644 --- a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/mma_from_smem.h +++ b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/gemm/mma_from_smem.h @@ -179,7 +179,7 @@ class MmaBaseFromSharedMemory { Shape::kK / WarpGemm::kK>; using WarpCount1 = WarpCount; - /// Number of warp-level GEMM oeprations + /// Number of warp-level GEMM operations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); static int const kWarpGemmIterations1 = kWarpGemmIterations; diff --git a/paddle/pir/src/core/op_operand_impl.cc b/paddle/pir/src/core/op_operand_impl.cc index 6977e1278df56..7b1e018f3e359 100644 --- a/paddle/pir/src/core/op_operand_impl.cc +++ b/paddle/pir/src/core/op_operand_impl.cc @@ -61,7 +61,7 @@ void OpOperandImpl::RemoveFromUdChain() { if (!source_) return; if (!prev_use_addr_) return; if (prev_use_addr_ == source_.impl()->first_use_addr()) { - /// NOTE: In ValueImpl, first_use_offseted_by_index_ use lower three bits + /// NOTE: In ValueImpl, first_use_offsetted_by_index_ use lower three bits /// storage index information, so need to be updated using the set_first_use /// method here. source_.impl()->set_first_use(next_use_); diff --git a/paddle/pir/src/core/program.cc b/paddle/pir/src/core/program.cc index e55dd6eedbe43..eda1924824db3 100644 --- a/paddle/pir/src/core/program.cc +++ b/paddle/pir/src/core/program.cc @@ -47,7 +47,7 @@ int64_t GetUniqueRandomId() { return random_id; } } - LOG(FATAL) << "Fatal bug occured in GetUniqueRandomId()."; + LOG(FATAL) << "Fatal bug occurred in GetUniqueRandomId()."; } } // namespace diff --git a/paddle/pir/src/core/value_impl.cc b/paddle/pir/src/core/value_impl.cc index 0cde931947e8d..3a4cbcc7a1ad1 100644 --- a/paddle/pir/src/core/value_impl.cc +++ b/paddle/pir/src/core/value_impl.cc @@ -28,7 +28,7 @@ namespace pir::detail { void ValueImpl::set_first_use(OpOperandImpl *first_use) { uint32_t offset = kind(); uintptr_t ptr = reinterpret_cast(first_use) + offset; - first_use_offseted_by_kind_ = reinterpret_cast(ptr); + first_use_offsetted_by_kind_ = reinterpret_cast(ptr); VLOG(10) << "The index of this value is: " << offset << ". The address of this value is: " << this << ". This value first use is: " << first_use << "."; @@ -58,7 +58,7 @@ ValueImpl::ValueImpl(Type type, uint32_t kind) : id_(GenerateId()) { kind)); type_ = type; uintptr_t ptr = reinterpret_cast(nullptr) + kind; - first_use_offseted_by_kind_ = reinterpret_cast(ptr); + first_use_offsetted_by_kind_ = reinterpret_cast(ptr); VLOG(10) << "Construct a ValueImpl whose's kind is " << kind << ". The value_impl address is: " << this; } diff --git a/paddle/pir/src/core/value_impl.h b/paddle/pir/src/core/value_impl.h index 0c9cfb4e8c0f3..1eee9393caddf 100644 --- a/paddle/pir/src/core/value_impl.h +++ b/paddle/pir/src/core/value_impl.h @@ -44,12 +44,12 @@ class alignas(8) ValueImpl { OpOperandImpl *first_use() const { return reinterpret_cast( - reinterpret_cast(first_use_offseted_by_kind_) & (~0x07)); + reinterpret_cast(first_use_offsetted_by_kind_) & (~0x07)); } void set_first_use(OpOperandImpl *first_use); - OpOperandImpl **first_use_addr() { return &first_use_offseted_by_kind_; } + OpOperandImpl **first_use_addr() { return &first_use_offsetted_by_kind_; } bool use_empty() const { return first_use() == nullptr; } @@ -60,10 +60,10 @@ class alignas(8) ValueImpl { std::string PrintUdChain(); /// - /// \brief Interface functions of "first_use_offseted_by_kind_" attribute. + /// \brief Interface functions of "first_use_offsetted_by_kind_" attribute. /// uint32_t kind() const { - return reinterpret_cast(first_use_offseted_by_kind_) & 0x07; + return reinterpret_cast(first_use_offsetted_by_kind_) & 0x07; } template @@ -93,7 +93,7 @@ class alignas(8) ValueImpl { /// output(OpInlineResultImpl); (2) index = 6: represent the position >=6 /// outline output(OpOutlineResultImpl); (3) index = 7 is reserved. /// - OpOperandImpl *first_use_offseted_by_kind_ = nullptr; + OpOperandImpl *first_use_offsetted_by_kind_ = nullptr; const uint64_t id_ = 0; }; diff --git a/python/paddle/distributed/auto_parallel/static/pir_pass.py b/python/paddle/distributed/auto_parallel/static/pir_pass.py index 958c0c112109b..b202faf5defd5 100644 --- a/python/paddle/distributed/auto_parallel/static/pir_pass.py +++ b/python/paddle/distributed/auto_parallel/static/pir_pass.py @@ -231,7 +231,7 @@ class ReshardPasses: @staticmethod def decompose_reshard_pass(dist_program): - # split composed reshard op into atomic reshard ops, which would increase the oppotunity of reshard Re-Use in following fold_reshard_pass. + # split composed reshard op into atomic reshard ops, which would increase the opportunity of reshard Re-Use in following fold_reshard_pass. del_ops = [] for op in dist_program.global_block().ops: if op.name() != 'dist_op.reshard': diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 82bf8890a59c1..74e65015276e5 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -2008,7 +2008,7 @@ def _add_param_group(self, param_group): Add a param group to parameter_list. Args: - param_group (dict): The group of Tensors to be optimzed with + param_group (dict): The group of Tensors to be optimized with different optimization options. """ params = param_group['params'] @@ -2052,7 +2052,7 @@ def _update_param_group(self, parameters): """ Update the param group with new entry Args: - parameters (dict): The extra group of Tensors to be optimzed with + parameters (dict): The extra group of Tensors to be optimized with different optimization options. Only used in child class. """ pass diff --git a/test/cpp/pir/core/paddle_fatal_test.cc b/test/cpp/pir/core/paddle_fatal_test.cc index e04ef076ca101..83943d24a0822 100644 --- a/test/cpp/pir/core/paddle_fatal_test.cc +++ b/test/cpp/pir/core/paddle_fatal_test.cc @@ -20,7 +20,7 @@ class FatalClass { public: FatalClass() {} - ~FatalClass() { PADDLE_FATAL("fatal occured in deconstructor!"); } + ~FatalClass() { PADDLE_FATAL("fatal occurred in deconstructor!"); } }; void throw_exception_in_func() { @@ -32,7 +32,7 @@ void terminate_in_func() { FatalClass test_case; } TEST(paddle_fatal_test, base) { EXPECT_FALSE(::common::enforce::IsPaddleFatalSkip()); - EXPECT_DEATH(terminate_in_func(), "fatal occured in deconstructor!.*"); + EXPECT_DEATH(terminate_in_func(), "fatal occurred in deconstructor!.*"); EXPECT_THROW(throw_exception_in_func(), common::enforce::EnforceNotMet); EXPECT_FALSE(::common::enforce::IsPaddleFatalSkip()); ::common::enforce::SkipPaddleFatal(true);