Skip to content

Commit

Permalink
Fix usless useless, etc (#62323)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Mar 4, 2024
1 parent 3ca79b6 commit b8b08b7
Show file tree
Hide file tree
Showing 12 changed files with 52 additions and 49 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ set(SHARED_INFERENCE_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/api/details/zero_copy_tensor.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/io_utils.cc)

# NOTE(Aurelius84): For inference library, some DEPS is usless
# NOTE(Aurelius84): For inference library, some DEPS is useless
# such as non-infer operator related targets et.al.
list(REMOVE_ITEM fluid_modules cinn_op_dialect)
# NOTE(Aurelisu84): Remove pir dialect related target DEPS for inference
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/set_value_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ limitations under the License. */
PADDLE_ENFORCE_EQ(vec_##attr_name__.size(), \
1UL, \
platform::errors::InvalidArgument( \
"attr axes/starst/ends/steps 's size in " \
"attr axes/starts/ends/steps 's size in " \
"set_value must be one, but got %d", \
vec_##attr_name__.size())); \
} \
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ inline const nvinfer1::IDimensionExpr* CalcOutputSize(
return output_size;
}

nvinfer1::DimsExprs UnflodInferMeta(
nvinfer1::DimsExprs UnfoldInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
Expand Down Expand Up @@ -879,7 +879,7 @@ nvinfer1::DimsExprs SolveInferMeta(
PD_REGISTER_DYNAMIC_INFER_META_FN(gather_nd, GatherNdInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(yolo_box, YoloBoxInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(instance_norm, InstanceNormInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(unfold, UnflodInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(unfold, UnfoldInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(scatter_nd_add, ScatterNdAddInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(inverse, UnchangedInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(moe, MoeInferMeta);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void TensorRTEngine::Weight::SetDataType(phi::DataType type) {
#endif
default:
paddle::platform::errors::InvalidArgument(
"Paddle-TRT loads weighths failed, found not supported data type %s.",
"Paddle-TRT loads weights failed, found not supported data type %s.",
type);
break;
}
Expand Down
51 changes: 27 additions & 24 deletions paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1460,7 +1460,7 @@ struct SimpleOpTypeSetTeller : public Teller {
}
if (desc.Output("Out").size() != 1) {
VLOG(3) << "The input op's Output(\"Out\").size() "
"should equal to 1, but reveceid Output(\"Out\").size() = "
"should equal to 1, but received Output(\"Out\").size() = "
<< desc.Output("Out").size() << ".";
return false;
}
Expand Down Expand Up @@ -2080,29 +2080,31 @@ struct SimpleOpTypeSetTeller : public Teller {
auto inputs = desc.Inputs();
bool has_bias_qk = (inputs.find("BiasQK") == inputs.end()) ? false : true;
if (has_bias_qk) {
auto* biasqk_desc =
auto* bias_qk_desc =
block->FindVarRecursive(desc.Input("BiasQK").front());
const auto biasqk_shape = biasqk_desc->GetShape();
const auto bias_qk_shape = bias_qk_desc->GetShape();
// The BiasQK's shape requires to be
// [batch, 1, 1, length] or [batch, head, length, length].
bool has_same_shape = head_number == biasqk_shape[1] &&
input_shape[1] == biasqk_shape[2] &&
input_shape[1] == biasqk_shape[3];
bool is_broadcastable = biasqk_shape[1] == 1 && biasqk_shape[2] == 1 &&
input_shape[1] == biasqk_shape[3];
is_broadcastable =
is_broadcastable || (biasqk_shape[0] == 1 && biasqk_shape[1] == 1 &&
input_shape[1] == biasqk_shape[2] &&
input_shape[1] == biasqk_shape[3]);
bool has_same_shape = head_number == bias_qk_shape[1] &&
input_shape[1] == bias_qk_shape[2] &&
input_shape[1] == bias_qk_shape[3];
bool is_broadcastable = bias_qk_shape[1] == 1 &&
bias_qk_shape[2] == 1 &&
input_shape[1] == bias_qk_shape[3];
is_broadcastable = is_broadcastable ||
(bias_qk_shape[0] == 1 && bias_qk_shape[1] == 1 &&
input_shape[1] == bias_qk_shape[2] &&
input_shape[1] == bias_qk_shape[3]);
if (!(has_same_shape || is_broadcastable)) {
VLOG(3) << "The BiasQK's shape is invalid, expect [" << input_shape[0]
<< ", 1, 1, " << input_shape[1] << "] "
<< "or [" << input_shape[0] << ", " << head_number << ", "
<< input_shape[1] << ", " << input_shape[1] << "] "
<< "or [" << input_shape[0] << "/1, " << 1 << ", "
<< input_shape[1] << ", " << input_shape[1] << "] "
<< "but got [" << biasqk_shape[0] << ", " << biasqk_shape[1]
<< ", " << biasqk_shape[2] << ", " << biasqk_shape[3] << "].";
<< "but got [" << bias_qk_shape[0] << ", " << bias_qk_shape[1]
<< ", " << bias_qk_shape[2] << ", " << bias_qk_shape[3]
<< "].";
return false;
}
} else {
Expand Down Expand Up @@ -2140,23 +2142,24 @@ struct SimpleOpTypeSetTeller : public Teller {
auto inputs = desc.Inputs();
bool has_bias_qk = (inputs.find("BiasQK") == inputs.end()) ? false : true;
if (has_bias_qk) {
auto* biasqk_desc =
auto* bias_qk_desc =
block->FindVarRecursive(desc.Input("BiasQK").front());
const auto biasqk_shape = biasqk_desc->GetShape();
const auto bias_qk_shape = bias_qk_desc->GetShape();
// The BiasQK's shape requires to be
// [batch, 1, 1, length] or [batch, head, length, length].
bool has_same_shape = head_number == biasqk_shape[1] &&
input_shape[1] == biasqk_shape[2] &&
input_shape[1] == biasqk_shape[3];
bool is_broadcastable = biasqk_shape[1] == 1 && biasqk_shape[2] == 1 &&
input_shape[1] == biasqk_shape[3];
bool has_same_shape = head_number == bias_qk_shape[1] &&
input_shape[1] == bias_qk_shape[2] &&
input_shape[1] == bias_qk_shape[3];
bool is_broadcastable = bias_qk_shape[1] == 1 &&
bias_qk_shape[2] == 1 &&
input_shape[1] == bias_qk_shape[3];
if (!(has_same_shape || is_broadcastable)) {
VLOG(3) << "The BiasQK's shape is invalid, expect [" << input_shape[0]
<< ", 1, 1, " << input_shape[1] << "] or [" << input_shape[0]
<< ", " << head_number << ", " << input_shape[1] << ", "
<< input_shape[1] << "] but [" << biasqk_shape[0] << ", "
<< biasqk_shape[1] << ", " << biasqk_shape[2] << ", "
<< biasqk_shape[3] << "].";
<< input_shape[1] << "] but [" << bias_qk_shape[0] << ", "
<< bias_qk_shape[1] << ", " << bias_qk_shape[2] << ", "
<< bias_qk_shape[3] << "].";
return false;
}
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/op_teller.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace tensorrt {

/*
* Single Op teller definition.
* One can override this and define a more complex tell logic, considerring more
* One can override this and define a more complex tell logic, considering more
* issues such as op_desc.
*/
struct Teller {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ paddle::any PluginArgumentMappingContext::Attr(
break;
};
default: {
LOG(ERROR) << "Can't conver op's attribute [" << attr_name
LOG(ERROR) << "Can't cover op's attribute [" << attr_name
<< "] to paddle any.";
}
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/inference/tensorrt/test_arg_mapping_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace paddle {
namespace inference {
namespace tensorrt {

TEST(ArgMappingContexTest, BasicFunction) {
TEST(ArgMappingContextTest, BasicFunction) {
paddle::framework::proto::OpDesc op;
op.set_type("imaged_op");
auto *input_var = op.add_inputs();
Expand Down Expand Up @@ -86,8 +86,8 @@ TEST(ArgMappingContexTest, BasicFunction) {
int int_attr = any_cast<int>(context.Attr("int_attr"));
EXPECT_EQ(int_attr, 1);

float flaot_attr = any_cast<float>(context.Attr("float_attr"));
EXPECT_EQ(flaot_attr, 1);
float float_attr = any_cast<float>(context.Attr("float_attr"));
EXPECT_EQ(float_attr, 1);

std::string string_attr = any_cast<std::string>(context.Attr("string_attr"));
EXPECT_EQ(string_attr, "1");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/trt_int8_calibrator.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class TRTCalibratorEngine {
std::unique_ptr<TensorRTEngine> engine_;
};
/*
* Manager to control the TensorRT Int8 calibration creation and deltetion.
* Manager to control the TensorRT Int8 calibration creation and deletion.
*/
class TRTCalibratorEngineManager {
public:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/utils/shape_range_info.proto
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ syntax = "proto2";
package paddle.inference.proto;

// To support trt dynamic shape, record the runtime shape
// information of all tmp tensors in the Compution graph.
// information of all tmp tensors in the Computation graph.
message ShapeRangeInfos {
message ShapeRangeInfo {
required string name = 1;
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/inference/utils/table_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,18 +57,18 @@ std::string TablePrinter::PrintTable() {
}

TablePrinter::TablePrinter(const std::vector<std::string>& header) {
size_t terminal_witdh = 500;
size_t terminal_width = 500;
#ifdef _WIN32
CONSOLE_SCREEN_BUFFER_INFO csbi;
int ret = GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi);
if (ret && (csbi.dwSize.X != 0)) {
terminal_witdh = csbi.dwSize.X;
terminal_width = csbi.dwSize.X;
}
#else
struct winsize terminal_size;
int status = ioctl(STDOUT_FILENO, TIOCGWINSZ, &terminal_size);
if (status == 0 && terminal_size.ws_col != 0) {
terminal_witdh = terminal_size.ws_col;
terminal_width = terminal_size.ws_col;
}
#endif

Expand All @@ -77,8 +77,8 @@ TablePrinter::TablePrinter(const std::vector<std::string>& header) {
widths_.emplace_back(0);
}

terminal_witdh = terminal_witdh - (2 * num_cols) - (num_cols + 1);
int avg_width = static_cast<int>(terminal_witdh / num_cols); // NOLINT
terminal_width = terminal_width - (2 * num_cols) - (num_cols + 1);
int avg_width = static_cast<int>(terminal_width / num_cols); // NOLINT

for (size_t i = 0; i < num_cols; ++i) {
shares_.emplace_back(avg_width);
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/ir_adaptor/translator/op_compat_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def to_phi_and_fluid_op_name(op_item):
op_compat_infos = yaml.safe_load(f)
op_name_mappings: Dict[str, str] = {}
op_arg_name_mappings: Dict[str, Dict[str, str]] = {}
op_mutable_attribues: Dict[str, Set[str]] = {}
op_mutable_attributes: Dict[str, Set[str]] = {}
op_mutable_attribute_infos: Dict[str, Dict[str, List[str]]] = {}

for op_compat_item in op_compat_infos:
Expand All @@ -70,15 +70,15 @@ def insert_new_arg_mappings(op_name: str, arg_mapping: Dict[str, str]):
def insert_new_mutable_attributes(
op_name: str, mutable_attribute_infos: Dict[str, Dict[str, str]]
):
if op_name not in op_mutable_attribues:
op_mutable_attribues[op_name] = set()
if op_name not in op_mutable_attributes:
op_mutable_attributes[op_name] = set()
if op_name not in op_mutable_attribute_infos:
op_mutable_attribute_infos[op_name] = {}
for (
attribute_name,
mutable_attribute_info,
) in mutable_attribute_infos.items():
op_mutable_attribues[op_name].add(attribute_name)
op_mutable_attributes[op_name].add(attribute_name)
op_mutable_attribute_infos[op_name][attribute_name] = []
for k, v in mutable_attribute_info.items():
if k == 'tensor_name' or k == 'tensors_name':
Expand Down Expand Up @@ -168,12 +168,12 @@ def insert_new_mutable_attributes(
{"out_grad_in": "Out@GRAD", "out_grad_out": "Out@GRAD"}
)

op_name_normailzer_template = env.get_template("op_compat_info.cc.j2")
op_name_normalizer_template = env.get_template("op_compat_info.cc.j2")
with open(output_source_file, 'wt') as f:
op_compat_definition = op_name_normailzer_template.render(
op_compat_definition = op_name_normalizer_template.render(
op_name_pairs=op_name_mappings,
op_arg_name_pairs=op_arg_name_mappings,
op_mutable_attributes=op_mutable_attribues,
op_mutable_attributes=op_mutable_attributes,
op_mutable_attribute_infos=op_mutable_attribute_infos,
)
f.write(op_compat_definition)
Expand All @@ -184,7 +184,7 @@ def insert_new_mutable_attributes(
# =====================================
def ParseArguments():
parser = argparse.ArgumentParser(
description='Generate OP Compatiable info Files By Yaml'
description='Generate OP Compatible info Files By Yaml'
)
parser.add_argument('--op_compat_yaml_file', type=str)
parser.add_argument('--output_source_file', type=str)
Expand Down

0 comments on commit b8b08b7

Please sign in to comment.