Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][U-9] Fix typo (Unsupport) (part2) #71023

Merged
merged 1 commit into from
Feb 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions paddle/cinn/ir/group_schedule/config/group_tile_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ bool ScheduleBlockRealizesShouldVectorizeCheck(
void CollectScheduleBlockRealizeLoadTensorsAndIndex(
ir::Expr block,
std::unordered_map<std::string, std::vector<std::vector<Expr>>>&
load_tensor_and_indexs) {
load_tensor_and_indexes) {
ir::ir_utils::CollectIRNodesWithoutTensor(
block,
[&](const ir::Expr* expr) {
Expand All @@ -146,7 +146,7 @@ void CollectScheduleBlockRealizeLoadTensorsAndIndex(
tensor,
::common::errors::InvalidArgument(
"Expected _Tensor_ node in load, but received nullptr."));
load_tensor_and_indexs[tensor->name].push_back(node->indices);
load_tensor_and_indexes[tensor->name].push_back(node->indices);
return true;
}
return false;
Expand All @@ -158,7 +158,7 @@ void CollectScheduleBlockRealizeLoadTensorsAndIndex(
void CollectScheduleBlockRealizeStoreTensorsAndIndex(
ir::Expr block,
std::unordered_map<std::string, std::vector<std::vector<Expr>>>&
store_tensor_and_indexs) {
store_tensor_and_indexes) {
ir::ir_utils::CollectIRNodesWithoutTensor(
block,
[&](const ir::Expr* expr) {
Expand All @@ -173,7 +173,7 @@ void CollectScheduleBlockRealizeStoreTensorsAndIndex(
tensor,
::common::errors::InvalidArgument(
"Expected _Tensor_ node in load, but received nullptr."));
store_tensor_and_indexs[tensor->name].push_back(node->indices);
store_tensor_and_indexes[tensor->name].push_back(node->indices);
return true;
}
return false;
Expand Down Expand Up @@ -294,10 +294,10 @@ bool ScheduleBlockRealizeCanVectorize(
std::unordered_map<ir::Var, ir::Expr> iter_var2value =
ir::analyzer::GetIterVarToValueOfSBlock(expr_schedule_block_realize);
std::unordered_map<std::string, std::vector<std::vector<Expr>>>
load_tensor_and_indexs;
load_tensor_and_indexes;
CollectScheduleBlockRealizeLoadTensorsAndIndex(expr_schedule_block_realize,
load_tensor_and_indexs);
for (const auto& tensor : load_tensor_and_indexs) {
load_tensor_and_indexes);
for (const auto& tensor : load_tensor_and_indexes) {
if (TensorCanBeVectorized(tensor.first,
tensor.second,
for_iters,
Expand All @@ -309,11 +309,11 @@ bool ScheduleBlockRealizeCanVectorize(
}

std::unordered_map<std::string, std::vector<std::vector<Expr>>>
store_tensor_and_indexs;
store_tensor_and_indexes;
CollectScheduleBlockRealizeStoreTensorsAndIndex(expr_schedule_block_realize,
store_tensor_and_indexs);
store_tensor_and_indexes);

for (const auto& tensor : store_tensor_and_indexs) {
for (const auto& tensor : store_tensor_and_indexes) {
if (TensorCanBeVectorized(tensor.first,
tensor.second,
for_iters,
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/optim/vectorize_for_trans.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ std::unordered_map<std::string, ir::Var> CollectExprSymbols(Expr *x) {
return std::move(mutator.GetSymbols());
}

Expr CalculateTensorOffsetWithIndexs(Expr *tensor,
const std::vector<ir::Expr> &indices) {
Expr CalculateTensorOffsetWithIndexes(Expr *tensor,
const std::vector<ir::Expr> &indices) {
auto *tensor_ptr = tensor->As<ir::_Tensor_>();
PADDLE_ENFORCE_NOT_NULL(
tensor_ptr,
Expand Down Expand Up @@ -341,7 +341,7 @@ class ScheduleBlockTensorVectorizeTeller : public ir::IRMutator<Expr *> {
}

// eg 2 : Address access of tensor contains vectorize axis.
Expr offset = CalculateTensorOffsetWithIndexs(&node->tensor, indices);
Expr offset = CalculateTensorOffsetWithIndexes(&node->tensor, indices);
// situation 2. don't deal with select situation
if (IsSelectOpWithSpecialOffset(offset)) {
vectorize_tensors_.clear();
Expand Down
17 changes: 9 additions & 8 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,10 @@ PreparedOp PrepareImpl(
#endif

#if defined(PADDLE_WITH_XPU)
bool is_xpu_unsupport = expected_kernel_key.backend() == phi::Backend::XPU &&
!paddle::platform::is_xpu_support_op(
op.Type(), expected_kernel_key.dtype());
bool is_xpu_unsupported =
expected_kernel_key.backend() == phi::Backend::XPU &&
!paddle::platform::is_xpu_support_op(op.Type(),
expected_kernel_key.dtype());
#endif

bool has_phi_kernel = false;
Expand Down Expand Up @@ -292,7 +293,7 @@ PreparedOp PrepareImpl(

if (phi_kernel.IsValid()
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
&& !is_xpu_unsupport
&& !is_xpu_unsupported
#endif
) {
VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
Expand Down Expand Up @@ -428,10 +429,10 @@ PreparedOp PrepareImpl(
kernels_iter->second.find(fluid_kernel_type) ==
kernels_iter->second.end())
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
|| is_xpu_unsupport
|| is_xpu_unsupported
#endif
#if defined(PADDLE_WITH_XPU_KP)
|| (is_xpu_unsupport && !is_xpu_kp_support)
|| (is_xpu_unsupported && !is_xpu_kp_support)
#endif
) {
if (has_phi_kernel) {
Expand Down Expand Up @@ -467,7 +468,7 @@ PreparedOp PrepareImpl(

#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
if (phi::is_xpu_place(fluid_kernel_type.place_) &&
(kernel_iter == kernels.end() || is_xpu_unsupport)) {
(kernel_iter == kernels.end() || is_xpu_unsupported)) {
VLOG(3) << "fluid missing XPU kernel: " << op.Type()
<< ", expected_kernel_key:" << fluid_kernel_type
<< ", fallbacking to CPU one!";
Expand All @@ -491,7 +492,7 @@ PreparedOp PrepareImpl(
<< ", using_kernel_key:" << fluid_kernel_type;
}
if (!is_xpu_kp_support &&
(kernel_iter == kernels.end() || is_xpu_unsupport)) {
(kernel_iter == kernels.end() || is_xpu_unsupported)) {
VLOG(3) << "fluid missing XPU kernel: " << op.Type()
<< ", expected_kernel_key:" << fluid_kernel_type
<< ", fallbacking to CPU one!";
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void Group::DivNRanks(const phi::DeviceContext &context, int64_t nranks) {
#ifdef PADDLE_WITH_HIP
if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
PADDLE_THROW(
common::errors::Fatal("Unsupport BF16 in DataParallel for now"));
common::errors::Fatal("Unsupported BF16 in DataParallel for now"));
}
framework::VisitDataTypeForHIP(
dtype_,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/reducer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ void Group::DivNRanks(phi::DenseTensor *tensor,
#ifdef PADDLE_WITH_HIP
if (dtype_ == paddle::framework::proto::VarType_Type_BF16) {
PADDLE_THROW(
common::errors::Fatal("Unsupport BF16 in DataParallel for now"));
common::errors::Fatal("Unsupported BF16 in DataParallel for now"));
}
framework::VisitDataTypeForHIP(
dtype_, DivNRanksForAllReduce<phi::GPUContext>(tensor, nranks, context));
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/inference/capi/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ paddle::PaddleDType ConvertToPaddleDType(PD_DataType dtype) {
PADDLE_ENFORCE_EQ(false,
true,
common::errors::Unimplemented(
"Unsupport dtype in ConvertToPaddleDType."));
"Unsupported dtype in ConvertToPaddleDType."));
return PD_PaddleDType::FLOAT32;
}
}
Expand All @@ -106,7 +106,7 @@ PD_DataType ConvertToPDDataType(PD_PaddleDType dtype) {
PADDLE_ENFORCE_EQ(false,
true,
common::errors::Unimplemented(
"Unsupport dtype in ConvertToPDDataType."));
"Unsupported dtype in ConvertToPDDataType."));
return PD_DataType::PD_UNKDTYPE;
}
}
Expand All @@ -123,7 +123,7 @@ PD_ACPrecision ConvertToACPrecision(Precision dtype) {
PADDLE_ENFORCE_EQ(false,
true,
common::errors::Unimplemented(
"Unsupport precision in ConvertToACPrecision."));
"Unsupported precision in ConvertToACPrecision."));
return PD_ACPrecision::kFloat32;
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/capi_exp/pd_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static Config::Precision ConvertToCxxPrecisionType(PD_PrecisionType precision) {
return Config::Precision::kHalf;
default:
PADDLE_THROW(common::errors::InvalidArgument(
"Unsupport paddle precision type %d.", precision));
"Unsupported paddle precision type %d.", precision));
return Config::Precision::kFloat32;
}
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/capi_exp/pd_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ PlaceType CvtToCxxPlaceType(PD_PlaceType place_type) {
return PlaceType::kCUSTOM;
default:
PADDLE_THROW(common::errors::InvalidArgument(
"Unsupport paddle place type %d.", place_type));
"Unsupported paddle place type %d.", place_type));
return PlaceType::kUNK;
}
}
Expand Down Expand Up @@ -260,7 +260,7 @@ DataType CvtToCxxDatatype(PD_DataType data_type) {
return DataType::INT8;
default:
PADDLE_THROW(common::errors::InvalidArgument(
"Unsupport paddle data type %d.", data_type));
"Unsupported paddle data type %d.", data_type));
return DataType::FLOAT32;
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1012,7 +1012,7 @@ nvinfer1::IPluginV2* SpmmPluginDynamicCreator::createPlugin(
} else if (field_name.compare("activation_id") == 0) {
activation_id = static_cast<const int*>(fc->fields[i].data)[0];
} else {
PADDLE_THROW(common::errors::Fatal("Unsupport plugin field"));
PADDLE_THROW(common::errors::Fatal("Unsupported plugin field"));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class DistributedLookupTableKernel : public framework::OpKernel<T> {
} else {
PADDLE_THROW(common::errors::InvalidArgument(
"Expected type of `W` must be Tensor, SelectedRows.But got "
"unsupport type: %s.",
"unsupported type: %s.",
framework::ToTypeName(var->Type())));
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ class SumOp : public framework::OperatorWithKernel {
PADDLE_THROW(common::errors::InvalidArgument(
"Expected type of Input(X) must be Tensor, SelectedRows or "
"DenseTensorArray. But got "
"unsupport type: %s.",
"unsupported type: %s.",
framework::ToTypeName(x_vars[0]->Type())));
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/distributed/gloo_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ void SetReduceFunc(P* opts, int reduce_type) {
break;
default:
PADDLE_THROW(
errors::InvalidArgument("Unsupport reduce type: %d.", reduce_type));
errors::InvalidArgument("Unsupported reduce type: %d.", reduce_type));
}
}

Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/kernel_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -212,14 +212,14 @@ bool KernelFactory::HasKernel(const std::string& kernel_name,
}
}
// check in xpu
bool xpu_unsupport = !phi::backends::xpu::is_xpu_support_op(
bool xpu_unsupported = !phi::backends::xpu::is_xpu_support_op(
fluid_op_name, kernel_key.dtype());
VLOG(6) << "Current KernelKey is " << kernel_key;
// Fall back to CPU, when FLAGS_enable_api_kernel_fallback is true and op
// was unregistered in xpu and kp
if (FLAGS_enable_api_kernel_fallback &&
(kernel_iter == iter->second.end() ||
(xpu_unsupport && !has_kp_kernel))) {
(xpu_unsupported && !has_kp_kernel))) {
return false;
}
#elif defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
Expand Down Expand Up @@ -341,13 +341,13 @@ KernelResult KernelFactory::SelectKernelOrThrowError(
}
}
// check in xpu
bool xpu_unsupport =
bool xpu_unsupported =
!phi::backends::xpu::is_xpu_support_op(fluid_op_name, kernel_key.dtype());
VLOG(6) << "Current KernelKey is " << kernel_key;
// Fall back to CPU, when FLAGS_enable_api_kernel_fallback is true and op
// was unregistered in xpu and kp
if (FLAGS_enable_api_kernel_fallback &&
(kernel_iter == iter->second.end() || (xpu_unsupport && !has_kp_kernel))
(kernel_iter == iter->second.end() || (xpu_unsupported && !has_kp_kernel))
#elif defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
VLOG(6) << "fluid_op_name: " << TransToFluidOpName(kernel_name);
bool is_xpu_support1 = phi::backends::xpu::is_xpu_support_op(
Expand Down