diff --git a/paddle/cinn/ir/schedule/schedule_base.cc b/paddle/cinn/ir/schedule/schedule_base.cc index b34221d73f052..885391aecd073 100644 --- a/paddle/cinn/ir/schedule/schedule_base.cc +++ b/paddle/cinn/ir/schedule/schedule_base.cc @@ -105,7 +105,7 @@ void ScheduleBase::Broadcast(const std::string& block_name, } std::vector all_loops = this->GetLoops(block_name); if (axes[0] >= all_loops.size()) { - throw std::runtime_error("axes execeed loop size"); + throw std::runtime_error("axes exceed loop size"); } // Get Last loop @@ -150,14 +150,14 @@ void ScheduleBase::Broadcast(const std::string& block_name, auto stride = Expr(1); auto in_offset = Expr(0); - std::set brodacast_set(info.broadcast_axes.begin(), + std::set broadcast_set(info.broadcast_axes.begin(), info.broadcast_axes.end()); for (int i = all_loops.size() - 1; i >= 0; --i) { auto loop_temp = all_loops[i].As(); offset = offset + loop_temp->loop_var * stride; stride = stride * loop_temp->extent; - if (!brodacast_set.count(i)) { + if (!broadcast_set.count(i)) { in_offset = in_offset + loop_temp->loop_var * stride; } } diff --git a/paddle/cinn/pybind/frontend.cc b/paddle/cinn/pybind/frontend.cc index b6ba2590f3dad..b21ae95cd9629 100644 --- a/paddle/cinn/pybind/frontend.cc +++ b/paddle/cinn/pybind/frontend.cc @@ -937,7 +937,7 @@ void BindFrontend(pybind11::module *m) { .def("get_cinn_name", [](PaddleModelConvertor &self, const std::string &paddle_name) { CHECK(self.var_model_to_program_map().count(paddle_name)) - << "Cannot find variabel " << paddle_name + << "Cannot find variable " << paddle_name << " in CINN! Please check."; return self.var_model_to_program_map().at(paddle_name); }); diff --git a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc index 5e2be03108294..2d7326f825acc 100644 --- a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc +++ b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc @@ -39,7 +39,7 @@ void ComputeInterceptor::PrepareDeps() { for (int64_t i = 0; i < node_->max_run_times(); ++i) { ready_size_map.emplace(i, 0); } - in_readys_.emplace(up.first, std::make_pair(up.second, ready_size_map)); + in_readies_.emplace(up.first, std::make_pair(up.second, ready_size_map)); } for (auto down : downstream) { out_buffs_.emplace(down.first, std::make_pair(down.second, 0)); @@ -106,11 +106,11 @@ InterceptorMessage ComputeInterceptor::PrepareVarsMsg() { } void ComputeInterceptor::IncreaseReady(int64_t up_id, int64_t scope_id) { - auto it = in_readys_.find(up_id); + auto it = in_readies_.find(up_id); PADDLE_ENFORCE_NE(it, - in_readys_.end(), + in_readies_.end(), platform::errors::NotFound( - "Cannot find upstream=%lld in in_readys.", up_id)); + "Cannot find upstream=%lld in in_readies.", up_id)); auto max_ready_size = it->second.first; const auto& ready_scope_map = it->second.second; @@ -171,7 +171,7 @@ bool ComputeInterceptor::IsInputReady() { for (int64_t i = start_micro_step; i < start_micro_step + num_micro_step; ++i) { bool flag = true; - for (auto& ins : in_readys_) { + for (auto& ins : in_readies_) { auto ready_size_map = ins.second.second; flag = flag && (ready_size_map.at(i) != 0); } @@ -268,7 +268,7 @@ void ComputeInterceptor::SendDataReadyToDownStream() { } void ComputeInterceptor::ReplyCompletedToUpStream() { - for (auto& ins : in_readys_) { + for (auto& ins : in_readies_) { auto up_id = ins.first; auto ready_size = ins.second.second.at(cur_scope_id_); ready_size -= 1; diff --git a/paddle/fluid/distributed/fleet_executor/compute_interceptor.h b/paddle/fluid/distributed/fleet_executor/compute_interceptor.h index 26205d5ac8264..bb26c62061734 100644 --- a/paddle/fluid/distributed/fleet_executor/compute_interceptor.h +++ b/paddle/fluid/distributed/fleet_executor/compute_interceptor.h @@ -41,7 +41,7 @@ class ComputeInterceptor : public Interceptor { // upstream_id-->(max_ready_size, scope-->ready_size) std::map>> - in_readys_{}; + in_readies_{}; // downstream_id-->(max_buffer_size, used_size) std::map> out_buffs_{}; diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index 0e147243093bd..64950443c0efc 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -1,6 +1,6 @@ add_subdirectory(generator) -set(EAGER_GENERETOR_DEPS +set(EAGER_GENERATOR_DEPS ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} pybind @@ -13,12 +13,12 @@ set(EAGER_GENERETOR_DEPS imperative_flag) if(WITH_CUSTOM_DEVICE) - set(EAGER_GENERETOR_DEPS ${EAGER_GENERETOR_DEPS} + set(EAGER_GENERATOR_DEPS ${EAGER_GENERATOR_DEPS} custom_device_common_op_registry) endif() add_executable(eager_generator eager_generator.cc) -target_link_libraries(eager_generator ${EAGER_GENERETOR_DEPS}) +target_link_libraries(eager_generator ${EAGER_GENERATOR_DEPS}) get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) target_link_libraries(eager_generator ${os_dependency_modules}) diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index 063d86c028c49..c272e09a9579f 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -292,7 +292,7 @@ class {} : public egr::GradNodeBase {{ // Forward API Call {} - // Log memory infomation + // Log memory information {} // Check NaN and Inf if needed {} @@ -346,7 +346,7 @@ class {} : public egr::GradNodeBase {{ {} // Forward API Call {} - // Log memory infomation + // Log memory information {} // Check NaN and Inf if needed {} @@ -538,8 +538,8 @@ class {} : public egr::GradNodeBase {{ """ TYPE_PROMOTION_LOGIC_TEMPLATE = """ if (phi::NeedTypePromotion({x}.dtype(), {y}.dtype())) {{ - VLOG(5) << "got different data type, run type protmotion automatically."; - LOG_FIRST_N(WARNING, 1) << "got different data type, run type protmotion automatically, this may cause data type been changed."; + VLOG(5) << "got different data type, run type promotion automatically."; + LOG_FIRST_N(WARNING, 1) << "got different data type, run type promotion automatically, this may cause data type been changed."; {op_name} auto promotion_type = phi::GetPromoteDtype(op_name, {x}.dtype(), {y}.dtype()); diff --git a/paddle/fluid/operators/controlflow/depend_op.cc b/paddle/fluid/operators/controlflow/depend_op.cc index 925990ba3ba5f..5755f0079b486 100644 --- a/paddle/fluid/operators/controlflow/depend_op.cc +++ b/paddle/fluid/operators/controlflow/depend_op.cc @@ -51,7 +51,7 @@ class DependOp : public framework::OperatorBase { PADDLE_ENFORCE_EQ(x_name, out_name, platform::errors::PreconditionNotMet( - "Input(X) and Output(Out) varibale should be the " + "Input(X) and Output(Out) variable should be the " "same, but got Input is %s and Output is %s.", x_name, out_name)); diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 1200a8d0c94f6..fd715c4e0d815 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -1,7 +1,7 @@ # The operators included in this file are: # 1) Operators defined only in PIR, dynamic graphs do not exist; # 2) The definitions of static graphs and dynamic graphs are inconsistent, but the final definition plan has not yet been clarified. -# After the definition is clearly defined, migrate to paddle /fluid/pir/dialect/operator/ir/update_ops.yaml or paddle/phi/api/yaml/ops.yaml +# After the definition is clearly defined, migrate to paddle/fluid/pir/dialect/operator/ir/update_ops.yaml or paddle/phi/api/yaml/ops.yaml - op : adadelta_ args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, Tensor learning_rate, Tensor master_param, float rho, float epsilon, bool multi_precision) diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index e1c009fa9cad0..085d28220a6a9 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -834,7 +834,7 @@ uint32_t GetCurSystemThreadId() { return id; } -void RecoreCurThreadId(uint64_t id) { +void RecordCurThreadId(uint64_t id) { std::lock_guard lock(system_thread_id_map_mutex); auto gid = GetCurSystemThreadId(); system_thread_id_map[gid] = id; diff --git a/paddle/phi/api/profiler/device_tracer.h b/paddle/phi/api/profiler/device_tracer.h index bde73357f2075..a0f4b5c54670e 100644 --- a/paddle/phi/api/profiler/device_tracer.h +++ b/paddle/phi/api/profiler/device_tracer.h @@ -162,5 +162,5 @@ void ClearCurBlock(); int BlockDepth(); // Set current thread id, so we can map the system thread id to thread id. -void RecoreCurThreadId(uint64_t id); +void RecordCurThreadId(uint64_t id); } // namespace phi diff --git a/paddle/phi/api/profiler/profiler_helper.h b/paddle/phi/api/profiler/profiler_helper.h index 31ccbbb12fb6f..16ae735fccc1e 100644 --- a/paddle/phi/api/profiler/profiler_helper.h +++ b/paddle/phi/api/profiler/profiler_helper.h @@ -73,7 +73,7 @@ inline EventList &GetEventList() { ProfilerHelper::g_thread_id = ProfilerHelper::g_next_thread_id++; ProfilerHelper::g_all_event_lists.emplace_front( ProfilerHelper::g_event_list); - RecoreCurThreadId(ProfilerHelper::g_thread_id); + RecordCurThreadId(ProfilerHelper::g_thread_id); } return *ProfilerHelper::g_event_list; } diff --git a/paddle/pir/src/dialect/shape/utils/dim_expr_util.cc b/paddle/pir/src/dialect/shape/utils/dim_expr_util.cc index 9549d66893228..8469e8edfa5f0 100644 --- a/paddle/pir/src/dialect/shape/utils/dim_expr_util.cc +++ b/paddle/pir/src/dialect/shape/utils/dim_expr_util.cc @@ -589,12 +589,12 @@ ConstRational MulConstRational(const ConstRational& lhs, const auto [lhs_num, lhs_dem] = lhs; const auto [rhs_num, rhs_dem] = rhs; // Crossing is correct. - const auto [simplifed_lhs_num, simplifed_rhs_dem] = + const auto [simplified_lhs_num, simplified_rhs_dem] = SimplifiedConstRational(lhs_num, rhs_dem); - const auto [simplifed_rhs_num, simplifed_lhs_dem] = + const auto [simplified_rhs_num, simplified_lhs_dem] = SimplifiedConstRational(rhs_num, lhs_dem); - return ConstRational{simplifed_lhs_num * simplifed_rhs_num, - simplifed_lhs_dem * simplifed_rhs_dem}; + return ConstRational{simplified_lhs_num * simplified_rhs_num, + simplified_lhs_dem * simplified_rhs_dem}; } template <> diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py index 8de6005681250..65834782bbe5f 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py @@ -144,7 +144,7 @@ def get_sparse_attrs(): if len(dist_varnames) != 0: raise ValueError( - "GeoStrategy can not support large scale embeding now, please use paddle.static.nn.embedding" + "GeoStrategy can not support large scale embedding now, please use paddle.static.nn.embedding" ) init_attrs = [] diff --git a/python/paddle/incubate/distributed/utils/io/dist_save.py b/python/paddle/incubate/distributed/utils/io/dist_save.py index 05820ebf61987..d0fbc1e7d97f9 100644 --- a/python/paddle/incubate/distributed/utils/io/dist_save.py +++ b/python/paddle/incubate/distributed/utils/io/dist_save.py @@ -156,7 +156,7 @@ def save(state_dict, path, **configs): paddle.save(gathered_state_dict, path, **configs) except: raise RuntimeError( - f'''Saving failed. Follwing are some suggestions: + f'''Saving failed. Following are some suggestions: 1) pass the param max_grouped_size to turn the grouped size smaller (current value of max_grouped_size is {max_size}) 2) if sharding stage is 1, use paddle.save rather than paddle.distributed.save 3) Concat the developers diff --git a/python/paddle/incubate/optimizer/pipeline.py b/python/paddle/incubate/optimizer/pipeline.py index 5edb4897e6c13..446dfb26782aa 100644 --- a/python/paddle/incubate/optimizer/pipeline.py +++ b/python/paddle/incubate/optimizer/pipeline.py @@ -481,13 +481,13 @@ def _get_op_device_attr(self, op): if device: assert device[0:3] == 'gpu', ( "Now, only gpu devices are " - "supported in pipeline parallemism." + "supported in pipeline parallelism." ) return device def _add_op_device_attr_for_op(self, op, idx, block): """ - Add op_device attrribute for ops that have not that attribute set. + Add op_device attribute for ops that have not that attribute set. We use "gpu:all" to represent the op should be put on all sub-programs, such as lr-related ops. Note that: "gpu:all" is only used by pipeline as an indicator.