Skip to content

Commit dcaca0f

Browse files
authored
[clang-tidy] enable bugprone-exception-escape check (#56692)
1 parent c0f5dac commit dcaca0f

24 files changed

+53
-45
lines changed

.clang-tidy

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ bugprone-argument-comment,
88
bugprone-copy-constructor-init,
99
-bugprone-dangling-handle,
1010
-bugprone-dynamic-static-initializers,
11-
-bugprone-exception-escape,
11+
bugprone-exception-escape,
1212
-bugprone-fold-init-type,
1313
-bugprone-forwarding-reference-overload,
1414
-bugprone-inaccurate-erase,

paddle/fluid/distributed/auto_parallel/spmd_rules/dim_trans.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ Flatten::Flatten(const std::vector<DimTrans*>& dims)
7272
all_dim_trans.emplace_back(this);
7373
}
7474

75-
Flatten::~Flatten() {
75+
Flatten::~Flatten() { // NOLINT
7676
input_dims_.assign(input_dims_.size(), nullptr);
7777
std::vector<DimTrans*>().swap(input_dims_);
7878
}

paddle/fluid/eager/auto_code_generator/eager_generator.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -3299,7 +3299,7 @@ static void DygraphCodeGeneration(const std::string& output_dir,
32993299
} // namespace framework
33003300
} // namespace paddle
33013301

3302-
int main(int argc, char* argv[]) {
3302+
int main(int argc, char* argv[]) { // NOLINT
33033303
if (argc != 3) {
33043304
std::cerr << "argc must be 3" << std::endl;
33053305
return -1;

paddle/fluid/eager/pylayer/py_layer_node.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#include "pybind11/pytypes.h"
2828

2929
namespace egr {
30-
GradNodePyLayer::~GradNodePyLayer() {
30+
GradNodePyLayer::~GradNodePyLayer() { // NOLINT
3131
pybind11::gil_scoped_acquire gil;
3232
Py_XDECREF(ctx_);
3333
}

paddle/fluid/framework/custom_operator.cc

+27-20
Original file line numberDiff line numberDiff line change
@@ -916,11 +916,12 @@ static void RegisterOperatorKernel(
916916
OperatorWithKernel::OpKernelFunc op_kernel_func;
917917
if (kernel_func) {
918918
VLOG(3) << "Register custom operator " << name << " with kernel func";
919-
op_kernel_func = [kernel_func, inputs, outputs, attrs, inplace_map](
920-
const framework::ExecutionContext& ctx) {
921-
VLOG(3) << "Custom Operator: run custom kernel func in lambda.";
922-
RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs, inplace_map);
923-
};
919+
op_kernel_func =
920+
[kernel_func, inputs, outputs, attrs, inplace_map]( // NOLINT
921+
const framework::ExecutionContext& ctx) {
922+
VLOG(3) << "Custom Operator: run custom kernel func in lambda.";
923+
RunKernelFunc(ctx, kernel_func, inputs, outputs, attrs, inplace_map);
924+
};
924925
} else {
925926
VLOG(3) << "Register custom operator " << name
926927
<< " with raw op kernel func";
@@ -1027,12 +1028,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
10271028
// InferShape
10281029
if (infer_shape_func == nullptr) {
10291030
// use default InferShape
1030-
info.infer_shape_ =
1031-
[op_inputs, op_outputs, op_inplace_map](InferShapeContext* ctx) {
1032-
RunDefaultInferShapeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
1033-
};
1031+
info.infer_shape_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
1032+
InferShapeContext* ctx) {
1033+
RunDefaultInferShapeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
1034+
};
10341035
} else {
1035-
info.infer_shape_ = [op_inputs,
1036+
info.infer_shape_ = [op_inputs, // NOLINT
10361037
op_outputs,
10371038
op_attrs,
10381039
op_inplace_map,
@@ -1051,12 +1052,12 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
10511052
// Infer Dtype
10521053
if (infer_dtype_func == nullptr) {
10531054
// use default InferDtype
1054-
info.infer_var_type_ =
1055-
[op_inputs, op_outputs, op_inplace_map](InferVarTypeContext* ctx) {
1056-
RunDefaultInferDtypeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
1057-
};
1055+
info.infer_var_type_ = [op_inputs, op_outputs, op_inplace_map]( // NOLINT
1056+
InferVarTypeContext* ctx) {
1057+
RunDefaultInferDtypeFunc(ctx, op_inputs, op_outputs, op_inplace_map);
1058+
};
10581059
} else {
1059-
info.infer_var_type_ = [op_inputs,
1060+
info.infer_var_type_ = [op_inputs, // NOLINT
10601061
op_outputs,
10611062
op_attrs,
10621063
op_inplace_map,
@@ -1115,7 +1116,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
11151116

11161117
// GradOpDescMaker
11171118
info.grad_op_maker_ =
1118-
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
1119+
[grad_op_name, // NOLINT
1120+
grad_op_inputs,
1121+
grad_op_outputs,
1122+
is_double_grad](
11191123
const OpDesc& fwd_op,
11201124
const std::unordered_set<std::string>& no_grad_set,
11211125
std::unordered_map<std::string, std::string>* grad_to_var,
@@ -1133,7 +1137,10 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
11331137

11341138
// GradOpBaseMaker
11351139
info.dygraph_grad_op_maker_ =
1136-
[grad_op_name, grad_op_inputs, grad_op_outputs, is_double_grad](
1140+
[grad_op_name, // NOLINT
1141+
grad_op_inputs,
1142+
grad_op_outputs,
1143+
is_double_grad](
11371144
const std::string& type,
11381145
const imperative::NameVarBaseMap& var_base_map_in,
11391146
const imperative::NameVarBaseMap& var_base_map_out,
@@ -1173,7 +1180,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
11731180

11741181
// Grad InferShape
11751182
if (grad_infer_shape_fn == nullptr) {
1176-
grad_info.infer_shape_ = [grad_op_inputs,
1183+
grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
11771184
grad_op_outputs,
11781185
is_double_grad](InferShapeContext* ctx) {
11791186
// 1. if forward input exists, gradient's shape is same with forward
@@ -1211,7 +1218,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
12111218
}
12121219
};
12131220
} else {
1214-
grad_info.infer_shape_ = [grad_op_inputs,
1221+
grad_info.infer_shape_ = [grad_op_inputs, // NOLINT
12151222
grad_op_outputs,
12161223
grad_op_attrs,
12171224
grad_op_inplace_map,
@@ -1230,7 +1237,7 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
12301237
// Grad InferDtype
12311238
if (grad_infer_dtype_fn != nullptr) {
12321239
grad_info.infer_var_type_ =
1233-
[grad_op_inputs,
1240+
[grad_op_inputs, // NOLINT
12341241
grad_op_outputs,
12351242
grad_op_attrs,
12361243
grad_op_inplace_map,

paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class SingleStreamGuard {
7272
}
7373
}
7474

75-
~SingleStreamGuard() {
75+
~SingleStreamGuard() { // NOLINT
7676
if (!is_changed) {
7777
return;
7878
}

paddle/fluid/framework/scope.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ PADDLE_DEFINE_EXPORTED_bool(
3333
namespace paddle {
3434
namespace framework {
3535

36-
Scope::~Scope() { DropKids(); }
36+
Scope::~Scope() { DropKids(); } // NOLINT
3737

3838
Scope& Scope::NewScope() const {
3939
Scope* child = new Scope(this);

paddle/fluid/inference/api/analysis_predictor.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -2581,7 +2581,7 @@ bool AnalysisPredictor::SaveTrtCalibToDisk() {
25812581
}
25822582
#endif
25832583

2584-
AnalysisPredictor::~AnalysisPredictor() {
2584+
AnalysisPredictor::~AnalysisPredictor() { // NOLINT
25852585
#ifdef PADDLE_WITH_TENSORRT
25862586
if (config_.tensorrt_engine_enabled() &&
25872587
config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 &&

paddle/fluid/memory/allocation/mmap_allocator.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ void MemoryMapAllocationPool::Clear() {
391391
memory_map_allocations_.clear();
392392
}
393393

394-
MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); }
394+
MemoryMapAllocationPool::~MemoryMapAllocationPool() { Clear(); } // NOLINT
395395

396396
} // namespace allocation
397397
} // namespace memory

paddle/fluid/platform/profiler.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ MemEvenRecorder::RecordMemEvent::RecordMemEvent(const Place &place,
591591
PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_);
592592
}
593593

594-
MemEvenRecorder::RecordMemEvent::~RecordMemEvent() {
594+
MemEvenRecorder::RecordMemEvent::~RecordMemEvent() { // NOLINT
595595
phi::DeviceTracer *tracer = phi::GetDeviceTracer();
596596
end_ns_ = PosixInNsec();
597597

paddle/fluid/platform/profiler/chrometracing_logger.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ ChromeTracingLogger::ChromeTracingLogger(const char* filename_cstr) {
6060
StartLog();
6161
}
6262

63-
ChromeTracingLogger::~ChromeTracingLogger() {
63+
ChromeTracingLogger::~ChromeTracingLogger() { // NOLINT
6464
EndLog();
6565
output_file_stream_.close();
6666
}

paddle/fluid/platform/profiler/dump/deserialization_reader.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ std::unique_ptr<ProfilerResult> DeserializationReader::Parse() {
150150
return std::unique_ptr<ProfilerResult>(profiler_result_ptr);
151151
}
152152

153-
DeserializationReader::~DeserializationReader() {
153+
DeserializationReader::~DeserializationReader() { // NOLINT
154154
delete node_trees_proto_;
155155
input_file_stream_.close();
156156
}

paddle/fluid/platform/profiler/dump/serialization_logger.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ SerializationLogger::SerializationLogger(const char* filename_cstr) {
371371
OpenFile();
372372
}
373373

374-
SerializationLogger::~SerializationLogger() {
374+
SerializationLogger::~SerializationLogger() { // NOLINT
375375
if (!output_file_stream_) {
376376
delete node_trees_proto_;
377377
return;

paddle/fluid/pybind/eager_functions.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ class EagerNumpyAllocation : public phi::Allocation {
106106
"The underlying PyObject pointer of numpy array cannot be None"));
107107
Py_INCREF(arr_);
108108
}
109-
~EagerNumpyAllocation() override {
109+
~EagerNumpyAllocation() override { // NOLINT
110110
py::gil_scoped_acquire gil;
111111
Py_DECREF(arr_);
112112
}

paddle/fluid/pybind/eager_legacy_op_function_generator.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -477,7 +477,7 @@ GenerateOpFunctions() {
477477
return std::make_tuple(op_function_list, bind_function_list);
478478
}
479479

480-
int main(int argc, char* argv[]) {
480+
int main(int argc, char* argv[]) { // NOLINT
481481
if (argc != 2) {
482482
std::cerr << "argc must be 2" << std::endl;
483483
return -1;

paddle/fluid/pybind/eager_utils.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -1834,7 +1834,7 @@ void PyVoidHook::operator()() {
18341834

18351835
PyObjectHolder::PyObjectHolder(PyObject* ptr) { ptr_ = ptr; }
18361836

1837-
PyObjectHolder::~PyObjectHolder() {
1837+
PyObjectHolder::~PyObjectHolder() { // NOLINT
18381838
::pybind11::gil_scoped_acquire gil;
18391839
Py_XDECREF(ptr_);
18401840
}
@@ -1860,7 +1860,7 @@ void PyObjectHolder::dec_ref() {
18601860

18611861
PackHook::PackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
18621862

1863-
PackHook::~PackHook() {
1863+
PackHook::~PackHook() { // NOLINT
18641864
::pybind11::gil_scoped_acquire gil;
18651865
Py_DECREF(hook_);
18661866
}
@@ -1899,7 +1899,7 @@ void* PackHook::operator()(void* py_tensor) {
18991899

19001900
UnPackHook::UnPackHook(PyObject* hook) : hook_(hook) { Py_INCREF(hook_); }
19011901

1902-
UnPackHook::~UnPackHook() {
1902+
UnPackHook::~UnPackHook() { // NOLINT
19031903
::pybind11::gil_scoped_acquire gil;
19041904
Py_DECREF(hook_);
19051905
}

paddle/fluid/pybind/imperative.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ class PyVariableWrapperHook : public imperative::VariableWrapperHook {
8888
Py_INCREF(py_func_);
8989
}
9090

91-
~PyVariableWrapperHook() override {
91+
~PyVariableWrapperHook() override { // NOLINT
9292
py::gil_scoped_acquire gil;
9393
Py_DECREF(py_func_);
9494
}

paddle/fluid/pybind/kernel_signature_generator.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
// },
4141
// ...
4242
// }
43-
int main(int argc, char **argv) {
43+
int main(int argc, char **argv) { // NOLINT
4444
paddle::framework::InitDefaultKernelSignatureMap();
4545
auto &kernel_signature_map = phi::DefaultKernelSignatureMap::Instance();
4646
auto &kernel_factory = phi::KernelFactory::Instance();

paddle/ir/core/storage_manager.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ struct ParametricStorageManager {
2828
explicit ParametricStorageManager(std::function<void(StorageBase *)> destroy)
2929
: destroy_(destroy) {}
3030

31-
~ParametricStorageManager() {
31+
~ParametricStorageManager() { // NOLINT
3232
for (const auto &instance : parametric_instances_) {
3333
destroy_(instance.second);
3434
}

paddle/phi/core/distributed/store/tcp_store.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ MasterDaemon::MasterDaemon(SocketType socket, int nranks, int timeout)
4343
_background_thread = std::thread{&MasterDaemon::run, this};
4444
}
4545

46-
MasterDaemon::~MasterDaemon() {
46+
MasterDaemon::~MasterDaemon() { // NOLINT
4747
VLOG(4) << ("begin to destruct MasterDaemon");
4848
StopByControlFd();
4949
_background_thread.join();

paddle/phi/core/sparse_coo_tensor.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
2121
this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
2222
}
2323

24-
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) {
24+
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT
2525
this->non_zero_elements_ = other.non_zero_elements_;
2626
this->non_zero_indices_ = other.non_zero_indices_;
2727
this->coalesced_ = other.coalesced_;

paddle/phi/core/string_tensor.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
4848
return *this;
4949
}
5050

51-
StringTensor& StringTensor::operator=(StringTensor&& other) {
51+
StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT
5252
meta_ = std::move(other.meta_);
5353
std::swap(holder_, other.holder_);
5454
return *this;

paddle/phi/core/tensor_meta.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,8 @@ DenseTensorMeta& DenseTensorMeta::operator=(const DenseTensorMeta& other) {
183183
return *this;
184184
}
185185

186-
DenseTensorMeta& DenseTensorMeta::operator=(DenseTensorMeta&& other) {
186+
DenseTensorMeta& DenseTensorMeta::operator=( // NOLINT
187+
DenseTensorMeta&& other) {
187188
is_scalar = other.is_scalar;
188189
use_gpudnn = other.use_gpudnn;
189190
dims = std::move(other.dims);

paddle/testing/paddle_gtest_main.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ limitations under the License. */
2323
DECLARE_bool(enable_gpu_memory_usage_log);
2424
#endif
2525

26-
int main(int argc, char** argv) {
26+
int main(int argc, char** argv) { // NOLINT
2727
paddle::memory::allocation::UseAllocatorStrategyGFlag();
2828
testing::InitGoogleTest(&argc, argv);
2929
std::vector<char*> new_argv;

0 commit comments

Comments
 (0)