Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][O-[1-2],O-[4-5],O[10-14]] Fix typos (occured,Ocurred,occures,offets,offseted,oeprations,Opeartion,operaion,oppotunity,optimzed,optmize) #70731

Merged
merged 1 commit into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -65,19 +65,8 @@ insid = 'insid'
insepection = 'insepection'
intall = 'intall'
instanciate = 'instanciate'
occured = 'occured'
Ocurred = 'Ocurred'
occures = 'occures'
offets = 'offets'
offseted = 'offseted'
Operants = 'Operants'
operants = 'operants'
oeprations = 'oeprations'
Opeartion = 'Opeartion'
operaion = 'operaion'
oppotunity = 'oppotunity'
optimzed = 'optimzed'
optmize = 'optmize'
optin = 'optin'
Optin = 'Optin'
rder = 'rder'
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ std::vector<pir::Value> GetMinimalInputs(
const std::vector<pir::Value>& input_tensors) {
std::unordered_set<symbol::DimExpr> handled_dim_exprs;
std::unordered_set<pir::Value> first_occurred_input_tensors;
auto TryCollectFirstOcurredInput_tensor =
auto TryCollectFirstOccurredInput_tensor =
[&](pir::Value input_tensor,
const std::vector<symbol::DimExpr>& dim_exprs) {
for (const auto& dim_expr : dim_exprs) {
Expand All @@ -601,11 +601,11 @@ std::vector<pir::Value> GetMinimalInputs(
const auto& shape_or_data_dim_exprs =
ShapeOrDataDimExprs4Value(input_tensor);
if (shape_or_data_dim_exprs.data().has_value()) {
TryCollectFirstOcurredInput_tensor(
TryCollectFirstOccurredInput_tensor(
input_tensor, shape_or_data_dim_exprs.data().value());
}
TryCollectFirstOcurredInput_tensor(input_tensor,
shape_or_data_dim_exprs.shape());
TryCollectFirstOccurredInput_tensor(input_tensor,
shape_or_data_dim_exprs.shape());
}
std::vector<pir::Value> ret{};
ret.reserve(input_tensors.size());
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/utils/ir_copy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ struct IRCopyVisitor : public ir::IRVisitorRequireReImpl<Expr> {
auto domain = Visit(op->domain);
auto buffer_expr = Expr(op->buffer);
// TODO(Superjomn) copy the operation.
auto operaion = op->operation;
auto operation = op->operation;
auto name = op->name;
auto tensor = make_shared<_Tensor_>();

Expand All @@ -289,7 +289,7 @@ struct IRCopyVisitor : public ir::IRVisitorRequireReImpl<Expr> {
tensor->domain = domain;
tensor->shape = shape;
tensor->reduce_axis = op->reduce_axis;
tensor->operation = operaion;
tensor->operation = operation;
tensor->name = name;
tensor->set_type(op->type());
tensor->axis_ = op->axis_;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/fleet/heter_ps/heter_comm_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -3141,7 +3141,7 @@ void HeterComm<KeyType, ValType, GradType, GPUAccessor>::

// local use end scatter(len is node num), reote use before scatter(len is
// pull size) recompute offsets and parts
VLOG(2) << "begin recalc local and remote size and offets";
VLOG(2) << "begin recalc local and remote size and offsets";
for (int i = 0; i < node_size_; i++) {
size_t local_size = 0;
size_t remote_size = 0;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/op_converter.h
Original file line number Diff line number Diff line change
Expand Up @@ -818,7 +818,7 @@ class OpConverter {
// layer->getOutput(i)->getDimensions().nbDims,
// 0,
// common::errors::InvalidArgument(
// "Error occures in Paddle-TRT layer with output name: %s",
// "Error occurred in Paddle-TRT layer with output name: %s",
// output_tensor_names[i].c_str()));
}
layer->setName((layer_name + ")").c_str());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/utils/general_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ pir::Type TranslateToIrDataType(phi::DataType dtype) {
return data_type;
}

pir::Operation* CreateOpeartionByName(const std::string& op_name,
pir::Operation* CreateOperationByName(const std::string& op_name,
const std::vector<pir::Value>& inputs,
const pir::AttributeMap& attrs,
const pir::PatternRewriter& rewriter) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/utils/general_functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ pir::Type TranslateToIrDataType(phi::DataType dtype);
*
* @return pir::Operation*
*/
pir::Operation* CreateOpeartionByName(const std::string& op_name,
pir::Operation* CreateOperationByName(const std::string& op_name,
const std::vector<pir::Value>& inputs,
const pir::AttributeMap& attrs,
const pir::PatternRewriter& rewriter);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4398,7 +4398,7 @@ void CrossAttentionXPUInferMeta(
phi::make_ddim({input_q_dims[0], input_q_dims[1], head_num * head_dim}));
qkv->set_dtype(out_dtype);
qkv->set_layout(input_q.layout());
// TODO(Terry) optmize the max value num
// TODO(Terry) optimize the max value num
// unable to pass few PR-CIs, so just use a constant value
// int xpu2_max_value_num = phi::backends::xpu::get_xpu_max_ptr_size(-1);
const int xpu2_max_value_num = 6;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ class DqMmaBase {
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;

/// Number of warp-level GEMM oeprations
/// Number of warp-level GEMM operations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class CustomMmaBase {
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;

/// Number of warp-level GEMM oeprations
/// Number of warp-level GEMM operations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ class MmaBaseFromSharedMemory {
Shape::kK / WarpGemm::kK>;
using WarpCount1 = WarpCount;

/// Number of warp-level GEMM oeprations
/// Number of warp-level GEMM operations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
static int const kWarpGemmIterations1 = kWarpGemmIterations;
Expand Down
2 changes: 1 addition & 1 deletion paddle/pir/src/core/op_operand_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ void OpOperandImpl::RemoveFromUdChain() {
if (!source_) return;
if (!prev_use_addr_) return;
if (prev_use_addr_ == source_.impl()->first_use_addr()) {
/// NOTE: In ValueImpl, first_use_offseted_by_index_ use lower three bits
/// NOTE: In ValueImpl, first_use_offsetted_by_index_ use lower three bits
/// storage index information, so need to be updated using the set_first_use
/// method here.
source_.impl()->set_first_use(next_use_);
Expand Down
2 changes: 1 addition & 1 deletion paddle/pir/src/core/program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ int64_t GetUniqueRandomId() {
return random_id;
}
}
LOG(FATAL) << "Fatal bug occured in GetUniqueRandomId().";
LOG(FATAL) << "Fatal bug occurred in GetUniqueRandomId().";
}

} // namespace
Expand Down
4 changes: 2 additions & 2 deletions paddle/pir/src/core/value_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace pir::detail {
void ValueImpl::set_first_use(OpOperandImpl *first_use) {
uint32_t offset = kind();
uintptr_t ptr = reinterpret_cast<uintptr_t>(first_use) + offset;
first_use_offseted_by_kind_ = reinterpret_cast<OpOperandImpl *>(ptr);
first_use_offsetted_by_kind_ = reinterpret_cast<OpOperandImpl *>(ptr);
VLOG(10) << "The index of this value is: " << offset
<< ". The address of this value is: " << this
<< ". This value first use is: " << first_use << ".";
Expand Down Expand Up @@ -58,7 +58,7 @@ ValueImpl::ValueImpl(Type type, uint32_t kind) : id_(GenerateId()) {
kind));
type_ = type;
uintptr_t ptr = reinterpret_cast<uintptr_t>(nullptr) + kind;
first_use_offseted_by_kind_ = reinterpret_cast<OpOperandImpl *>(ptr);
first_use_offsetted_by_kind_ = reinterpret_cast<OpOperandImpl *>(ptr);
VLOG(10) << "Construct a ValueImpl whose's kind is " << kind
<< ". The value_impl address is: " << this;
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/pir/src/core/value_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ class alignas(8) ValueImpl {

OpOperandImpl *first_use() const {
return reinterpret_cast<OpOperandImpl *>(
reinterpret_cast<uintptr_t>(first_use_offseted_by_kind_) & (~0x07));
reinterpret_cast<uintptr_t>(first_use_offsetted_by_kind_) & (~0x07));
}

void set_first_use(OpOperandImpl *first_use);

OpOperandImpl **first_use_addr() { return &first_use_offseted_by_kind_; }
OpOperandImpl **first_use_addr() { return &first_use_offsetted_by_kind_; }

bool use_empty() const { return first_use() == nullptr; }

Expand All @@ -60,10 +60,10 @@ class alignas(8) ValueImpl {
std::string PrintUdChain();

///
/// \brief Interface functions of "first_use_offseted_by_kind_" attribute.
/// \brief Interface functions of "first_use_offsetted_by_kind_" attribute.
///
uint32_t kind() const {
return reinterpret_cast<uintptr_t>(first_use_offseted_by_kind_) & 0x07;
return reinterpret_cast<uintptr_t>(first_use_offsetted_by_kind_) & 0x07;
}

template <typename T>
Expand Down Expand Up @@ -93,7 +93,7 @@ class alignas(8) ValueImpl {
/// output(OpInlineResultImpl); (2) index = 6: represent the position >=6
/// outline output(OpOutlineResultImpl); (3) index = 7 is reserved.
///
OpOperandImpl *first_use_offseted_by_kind_ = nullptr;
OpOperandImpl *first_use_offsetted_by_kind_ = nullptr;

const uint64_t id_ = 0;
};
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/static/pir_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ class ReshardPasses:

@staticmethod
def decompose_reshard_pass(dist_program):
# split composed reshard op into atomic reshard ops, which would increase the oppotunity of reshard Re-Use in following fold_reshard_pass.
# split composed reshard op into atomic reshard ops, which would increase the opportunity of reshard Re-Use in following fold_reshard_pass.
del_ops = []
for op in dist_program.global_block().ops:
if op.name() != 'dist_op.reshard':
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/optimizer/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2008,7 +2008,7 @@ def _add_param_group(self, param_group):
Add a param group to parameter_list.

Args:
param_group (dict): The group of Tensors to be optimzed with
param_group (dict): The group of Tensors to be optimized with
different optimization options.
"""
params = param_group['params']
Expand Down Expand Up @@ -2052,7 +2052,7 @@ def _update_param_group(self, parameters):
"""
Update the param group with new entry
Args:
parameters (dict): The extra group of Tensors to be optimzed with
parameters (dict): The extra group of Tensors to be optimized with
different optimization options. Only used in child class.
"""
pass
Expand Down
4 changes: 2 additions & 2 deletions test/cpp/pir/core/paddle_fatal_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
class FatalClass {
public:
FatalClass() {}
~FatalClass() { PADDLE_FATAL("fatal occured in deconstructor!"); }
~FatalClass() { PADDLE_FATAL("fatal occurred in deconstructor!"); }
};

void throw_exception_in_func() {
Expand All @@ -32,7 +32,7 @@ void terminate_in_func() { FatalClass test_case; }

TEST(paddle_fatal_test, base) {
EXPECT_FALSE(::common::enforce::IsPaddleFatalSkip());
EXPECT_DEATH(terminate_in_func(), "fatal occured in deconstructor!.*");
EXPECT_DEATH(terminate_in_func(), "fatal occurred in deconstructor!.*");
EXPECT_THROW(throw_exception_in_func(), common::enforce::EnforceNotMet);
EXPECT_FALSE(::common::enforce::IsPaddleFatalSkip());
::common::enforce::SkipPaddleFatal(true);
Expand Down
Loading