From 3473ef6da5de4dcd94fc52af429c2f97359db7f8 Mon Sep 17 00:00:00 2001 From: enkilee Date: Sun, 26 Jan 2025 09:50:31 +0800 Subject: [PATCH] fix --- _typos.toml | 8 ---- .../hlir/framework/pir/compilation_task.cc | 2 +- .../fluid/framework/details/nccl_op_handle.h | 2 +- .../pir/dialect/op_generator/op_build_gen.py | 2 +- paddle/phi/kernels/gpu/unique_kernel.cu | 2 +- .../transformer_dygraph_model.py | 48 +++++++++---------- ...st_sigmoid_cross_entropy_with_logits_op.py | 8 ++-- tools/cinn/gen_c++_tutorial.py | 2 +- tools/gen_ut_cmakelists.py | 4 +- tools/sampcd_processor.py | 10 ++-- 10 files changed, 40 insertions(+), 48 deletions(-) diff --git a/_typos.toml b/_typos.toml index 5b4af7ba2a5fd4..d32a657e6f85ec 100644 --- a/_typos.toml +++ b/_typos.toml @@ -60,14 +60,6 @@ ouput = 'ouput' outpout = 'outpout' ouptut = 'ouptut' Ouput = 'Ouput' -precending = 'precending' -Prepration = 'Prepration' -Prepar = 'Prepar' -preprocesser = 'preprocesser' -priorites = 'priorites' -probabalistic = 'probabalistic' -processer = 'processer' -proccess = 'proccess' producted = 'producted' progam = 'progam' progrss = 'progrss' diff --git a/paddle/cinn/hlir/framework/pir/compilation_task.cc b/paddle/cinn/hlir/framework/pir/compilation_task.cc index af418f4194d065..c0833a1dbdcf10 100644 --- a/paddle/cinn/hlir/framework/pir/compilation_task.cc +++ b/paddle/cinn/hlir/framework/pir/compilation_task.cc @@ -71,7 +71,7 @@ void GroupCompilationContext::PrepareModuleBuilder() { PADDLE_ENFORCE_EQ(predicates_.size(), priorities_.size(), ::common::errors::InvalidArgument( - "The size of predicates and priorites should be " + "The size of predicates and priorities should be " "the same.")); for (const ir::Expr& predicate : predicates_) { module_builder_.AddPredicate(predicate); diff --git a/paddle/fluid/framework/details/nccl_op_handle.h b/paddle/fluid/framework/details/nccl_op_handle.h index a43e80dfdba1ea..8c45917fff0c59 100644 --- a/paddle/fluid/framework/details/nccl_op_handle.h +++ b/paddle/fluid/framework/details/nccl_op_handle.h @@ -123,7 +123,7 @@ class NCCLOpHandleBase : public OpHandleBase { 1, common::errors::InvalidArgument( "HierarchicalAllReduce can only run " - "one proccess with one card mode, but got %d cards.", + "one process with one card mode, but got %d cards.", places_.size())); for (auto& p : places_) { diff --git a/paddle/fluid/pir/dialect/op_generator/op_build_gen.py b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py index fb0ceccf8ae733..c8e08e8baa347b 100644 --- a/paddle/fluid/pir/dialect/op_generator/op_build_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py @@ -593,7 +593,7 @@ def GenBuildOutputs( meta_{name}.push_back(&vec_meta_{name}[i]); }} """ - # Prepar input type + # Prepare input type for idx in range(len(op_input_name_list)): # is a vector if 'pir::VectorType' in op_input_type_list[idx]: diff --git a/paddle/phi/kernels/gpu/unique_kernel.cu b/paddle/phi/kernels/gpu/unique_kernel.cu index e08aa5bece3bc4..08a3aff718a768 100644 --- a/paddle/phi/kernels/gpu/unique_kernel.cu +++ b/paddle/phi/kernels/gpu/unique_kernel.cu @@ -121,7 +121,7 @@ UniqueFlattenedCUDATensor(const Context& context, bool return_inverse, bool return_counts, int64_t num_input) { - // 0. Prepration + // 0. Preparation auto equal = thrust::equal_to(); auto not_equal = thrust::not_equal_to(); DenseTensor in_hat; diff --git a/test/dygraph_to_static/transformer_dygraph_model.py b/test/dygraph_to_static/transformer_dygraph_model.py index 3189e284d92c7f..211dd62daf5c61 100644 --- a/test/dygraph_to_static/transformer_dygraph_model.py +++ b/test/dygraph_to_static/transformer_dygraph_model.py @@ -203,31 +203,31 @@ def __init__( ): super().__init__() - self.preprocesser1 = PrePostProcessLayer( + self.preprocessor1 = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) self.self_attn = MultiHeadAttention( d_key, d_value, d_model, n_head, attention_dropout ) - self.postprocesser1 = PrePostProcessLayer( + self.postprocessor1 = PrePostProcessLayer( postprocess_cmd, d_model, prepostprocess_dropout ) - self.preprocesser2 = PrePostProcessLayer( + self.preprocessor2 = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) - self.postprocesser2 = PrePostProcessLayer( + self.postprocessor2 = PrePostProcessLayer( postprocess_cmd, d_model, prepostprocess_dropout ) def forward(self, enc_input, attn_bias): attn_output = self.self_attn( - self.preprocesser1(enc_input), None, None, attn_bias + self.preprocessor1(enc_input), None, None, attn_bias ) - attn_output = self.postprocesser1(attn_output, enc_input) - ffn_output = self.ffn(self.preprocesser2(attn_output)) - ffn_output = self.postprocesser2(ffn_output, attn_output) + attn_output = self.postprocessor1(attn_output, enc_input) + ffn_output = self.ffn(self.preprocessor2(attn_output)) + ffn_output = self.postprocessor2(ffn_output, attn_output) return ffn_output @@ -267,7 +267,7 @@ def __init__( ), ) ) - self.processer = PrePostProcessLayer( + self.processor = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) @@ -276,7 +276,7 @@ def forward(self, enc_input, attn_bias): enc_output = encoder_layer(enc_input, attn_bias) enc_input = enc_output - return self.processer(enc_output) + return self.processor(enc_output) class Embedder(Layer): @@ -378,29 +378,29 @@ def __init__( ): super().__init__() - self.preprocesser1 = PrePostProcessLayer( + self.preprocessor1 = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) self.self_attn = MultiHeadAttention( d_key, d_value, d_model, n_head, attention_dropout ) - self.postprocesser1 = PrePostProcessLayer( + self.postprocessor1 = PrePostProcessLayer( postprocess_cmd, d_model, prepostprocess_dropout ) - self.preprocesser2 = PrePostProcessLayer( + self.preprocessor2 = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) self.cross_attn = MultiHeadAttention( d_key, d_value, d_model, n_head, attention_dropout ) - self.postprocesser2 = PrePostProcessLayer( + self.postprocessor2 = PrePostProcessLayer( postprocess_cmd, d_model, prepostprocess_dropout ) - self.preprocesser3 = PrePostProcessLayer( + self.preprocessor3 = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) self.ffn = FFN(d_inner_hid, d_model, relu_dropout) - self.postprocesser3 = PrePostProcessLayer( + self.postprocessor3 = PrePostProcessLayer( postprocess_cmd, d_model, prepostprocess_dropout ) @@ -408,20 +408,20 @@ def forward( self, dec_input, enc_output, self_attn_bias, cross_attn_bias, cache=None ): self_attn_output = self.self_attn( - self.preprocesser1(dec_input), None, None, self_attn_bias, cache + self.preprocessor1(dec_input), None, None, self_attn_bias, cache ) - self_attn_output = self.postprocesser1(self_attn_output, dec_input) + self_attn_output = self.postprocessor1(self_attn_output, dec_input) cross_attn_output = self.cross_attn( - self.preprocesser2(self_attn_output), + self.preprocessor2(self_attn_output), enc_output, enc_output, cross_attn_bias, ) - cross_attn_output = self.postprocesser2( + cross_attn_output = self.postprocessor2( cross_attn_output, self_attn_output ) - ffn_output = self.ffn(self.preprocesser3(cross_attn_output)) - ffn_output = self.postprocesser3(ffn_output, cross_attn_output) + ffn_output = self.ffn(self.preprocessor3(cross_attn_output)) + ffn_output = self.postprocessor3(ffn_output, cross_attn_output) return ffn_output @@ -461,7 +461,7 @@ def __init__( ), ) ) - self.processer = PrePostProcessLayer( + self.processor = PrePostProcessLayer( preprocess_cmd, d_model, prepostprocess_dropout ) @@ -482,7 +482,7 @@ def forward( None if caches is None else caches[i], ) dec_input = dec_output - return self.processer(dec_output) + return self.processor(dec_output) class WrapDecoder(Layer): diff --git a/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py b/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py index 8aadb07a3083b1..fb4c38e3091def 100644 --- a/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py +++ b/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py @@ -69,7 +69,7 @@ def test_check_grad(self): class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" + """Test sigmoid_cross_entropy_with_logit_op with probabilistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -110,7 +110,7 @@ def test_check_grad(self): class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" + """Test sigmoid_cross_entropy_with_logit_op with probabilistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -146,7 +146,7 @@ def test_check_grad(self): class TestSigmoidCrossEntropyWithLogitsOp4(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" + """Test sigmoid_cross_entropy_with_logit_op with probabilistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -226,7 +226,7 @@ def test_check_grad(self): class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): - """Test sigmoid_cross_entropy_with_logit_op with probabalistic label""" + """Test sigmoid_cross_entropy_with_logit_op with probabilistic label""" def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" diff --git a/tools/cinn/gen_c++_tutorial.py b/tools/cinn/gen_c++_tutorial.py index 393160073940ca..f58d3d697e3463 100644 --- a/tools/cinn/gen_c++_tutorial.py +++ b/tools/cinn/gen_c++_tutorial.py @@ -47,7 +47,7 @@ def h3(self, title: str): self.add_line('### ' + title) def code_block(self, lang: str, block: list[str]): - # drop the precending and tailing empty lines to make code block more compact + # drop the preceding and tailing empty lines to make code block more compact pre_valid_offset = 0 tail_valid_offset = 0 for x in block: diff --git a/tools/gen_ut_cmakelists.py b/tools/gen_ut_cmakelists.py index a441931125a9ff..4995198132dfaf 100644 --- a/tools/gen_ut_cmakelists.py +++ b/tools/gen_ut_cmakelists.py @@ -80,7 +80,7 @@ def _process_conditions(conditions): return [c.strip() for c in conditions] -def _proccess_archs(arch): +def _process_archs(arch): """ desc: Input archs options and warp it with 'WITH_', 'OR' and '()' in cmakelist grammar. @@ -451,7 +451,7 @@ def _parse_line(self, line, curdir): envs = _process_envs(envs) conditions = _process_conditions(conditions) - archs = _proccess_archs(archs) + archs = _process_archs(archs) os_ = _process_os(os_) run_serial = _process_run_serial(run_serial) diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 76757cb2434ab9..9f5caa71d05f51 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -543,12 +543,12 @@ def _execute_xdoctest( if self._use_multiprocessing: _ctx = multiprocessing.get_context('spawn') result_queue = _ctx.Queue() - exec_processer = functools.partial(_ctx.Process, daemon=True) + exec_processor = functools.partial(_ctx.Process, daemon=True) else: result_queue = queue.Queue() - exec_processer = functools.partial(threading.Thread, daemon=True) + exec_processor = functools.partial(threading.Thread, daemon=True) - processer = exec_processer( + processor = exec_processor( target=self._execute_with_queue, args=( result_queue, @@ -557,11 +557,11 @@ def _execute_xdoctest( ), ) - processer.start() + processor.start() result = result_queue.get( timeout=directives.get('timeout', TEST_TIMEOUT) ) - processer.join() + processor.join() return result