Skip to content

Commit

Permalink
[clang-tidy] NO.8 enable cppcoreguidelines-narrowing-conversions. s…
Browse files Browse the repository at this point in the history
…tep:1 (#56218)
  • Loading branch information
gouzil authored Aug 29, 2023
1 parent 0236771 commit b702d2a
Show file tree
Hide file tree
Showing 113 changed files with 903 additions and 751 deletions.
6 changes: 3 additions & 3 deletions paddle/phi/api/profiler/device_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -797,8 +797,8 @@ void ClearCurAnnotation() {
if (!main_thread_annotation_stack.empty()) {
std::string name = annotation_stack.back()->name();
std::string main_name = main_thread_annotation_stack.back()->name();
int main_name_len = main_name.length();
int name_len = name.length();
int main_name_len = static_cast<int>(main_name.length());
int name_len = static_cast<int>(name.length());
int prefix_len = main_name_len - name_len;

if ((prefix_len > 0 && main_name.at(prefix_len - 1) == '/' &&
Expand All @@ -825,7 +825,7 @@ void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); }

void ClearCurBlock() { block_id_stack.pop_back(); }

int BlockDepth() { return block_id_stack.size(); }
int BlockDepth() { return static_cast<int>(block_id_stack.size()); }

uint32_t GetCurSystemThreadId() {
std::stringstream ss;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/profiler/event.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class Event {
Event *parent_{nullptr};
uint64_t thread_id_;
EventRole role_{};
int64_t cpu_ns_;
uint64_t cpu_ns_;
bool visited_status_{false};
std::string attr_;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/profiler/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ Event::Event(EventType type,
const EventType &Event::type() const { return type_; }

double Event::CpuElapsedMs(const Event &e) const {
return (e.cpu_ns_ - cpu_ns_) / (1000000.0);
return (static_cast<double>(e.cpu_ns_ - cpu_ns_)) / (1000000.0);
}

double Event::CudaElapsedMs(const Event &e) const {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/profiler/profiler.proto
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ message Event {
optional uint64 end_ns = 3;
// When positive, it represents gpu id. When -1, it represents CPU.
optional int64 device_id = 5;
optional int64 sub_device_id = 6;
optional uint64 sub_device_id = 6;

optional MemCopy memcopy = 7;
optional string detail_info = 9;
Expand Down
6 changes: 4 additions & 2 deletions paddle/phi/backends/cpu/cpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ size_t CpuTotalPhysicalMemory() {
size_t CpuMaxAllocSize() {
// For distributed systems, it requires configuring and limiting
// the fraction of memory to use.
return FLAGS_fraction_of_cpu_memory_to_use * CpuTotalPhysicalMemory();
return static_cast<size_t>(FLAGS_fraction_of_cpu_memory_to_use *
static_cast<double>(CpuTotalPhysicalMemory()));
}

size_t CpuMaxChunkSize() {
Expand All @@ -97,7 +98,8 @@ size_t CpuMinChunkSize() {
size_t CUDAPinnedMaxAllocSize() {
// For distributed systems, it requires configuring and limiting
// the fraction of memory to use.
return FLAGS_fraction_of_cuda_pinned_memory_to_use * CpuTotalPhysicalMemory();
return static_cast<size_t>(FLAGS_fraction_of_cuda_pinned_memory_to_use *
static_cast<double>(CpuTotalPhysicalMemory()));
}

size_t CUDAPinnedMinChunkSize() {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/device_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ std::vector<size_t> DeviceManager::GetSelectedDeviceList(
device_list.push_back(atoi(id.c_str()));
}
} else {
int count = DeviceManager::GetDeviceCount(device_type);
int count = static_cast<int>(DeviceManager::GetDeviceCount(device_type));
for (int i = 0; i < count; ++i) {
device_list.push_back(i);
}
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/core/ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@
namespace phi {

DDim make_ddim(std::initializer_list<int64_t> dims) {
return DDim(dims.begin(), dims.size());
return DDim(dims.begin(), static_cast<int>(dims.size()));
}

DDim make_ddim(const std::vector<int64_t>& dims) {
return DDim(dims.data(), dims.size());
return DDim(dims.data(), static_cast<int>(dims.size()));
}

DDim make_ddim(const std::vector<int>& dims) {
return DDim(dims.data(), dims.size());
return DDim(dims.data(), static_cast<int>(dims.size()));
}

struct DDimEqualityVisitor {
Expand Down Expand Up @@ -186,19 +186,19 @@ DDim stride_numel(const DDim& ddim) {
DDim DDim::reshape(std::vector<int>& shape) const {
const DDim& in_dims = *this;

for (uint64_t i = 0; i < shape.size(); ++i) {
for (int i = 0; i < static_cast<int>(shape.size()); ++i) {
if (shape[i] == 0) {
shape[i] = in_dims.at(i);
shape[i] = static_cast<int>(in_dims.at(i));
}
}

// Dim marked as "-1" must be inferred
auto it = std::find(shape.begin(), shape.end(), -1);
if (it != shape.end()) {
int index = std::distance(shape.begin(), it);
int index = static_cast<int>(std::distance(shape.begin(), it));
int reshape_out_product =
std::accumulate(shape.begin(), shape.end(), -1, std::multiplies<int>());
shape[index] = product(in_dims) / reshape_out_product;
shape[index] = static_cast<int>(product(in_dims)) / reshape_out_product;
}

return phi::make_ddim(shape);
Expand All @@ -208,7 +208,7 @@ DDim DDim::transpose(const std::vector<int>& axis) const {
const DDim& in_dims = *this;

DDim out_dims(in_dims);
for (size_t i = 0; i < axis.size(); i++) {
for (int i = 0; i < static_cast<int>(axis.size()); i++) {
out_dims[i] = in_dims[axis[i]];
}
return out_dims;
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/dense_tensor_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ std::vector<DenseTensor> DenseTensor::Split(int64_t split_size,
"split expects split_size be non-negative, but got split_size is %d",
split_size));

int64_t numel_size = meta_.dims[axis];
int64_t numel_size = meta_.dims[static_cast<int>(axis)];

int64_t num_splits = 1;
if (split_size != 0) {
Expand Down Expand Up @@ -371,7 +371,7 @@ std::vector<DenseTensor> DenseTensor::Chunk(int64_t chunks,
phi::errors::OutOfRange(
"chunks expects to be greater than 0, but got chunks is %d", chunks));

int64_t numel_size = meta_.dims[axis];
int64_t numel_size = meta_.dims[static_cast<int>(axis)];
int64_t split_size = (numel_size + chunks - 1) / chunks;
return Split(split_size, axis);
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/core/distributed/auto_parallel/device_mesh.cc
Original file line number Diff line number Diff line change
Expand Up @@ -330,25 +330,25 @@ DeviceMesh DeviceMesh::from_proto(const DeviceMeshProto &proto) {
mesh.name_ = proto.name();

mesh.shape_.resize(proto.shape_size());
for (int64_t i = 0; i < proto.shape_size(); ++i) {
for (int i = 0; i < proto.shape_size(); ++i) {
mesh.shape_[i] = proto.shape(i);
}

mesh.device_ids_.resize(proto.device_ids_size());
for (int64_t i = 0; i < proto.device_ids_size(); ++i) {
for (int i = 0; i < proto.device_ids_size(); ++i) {
mesh.device_ids_[i] = proto.device_ids(i);
}

mesh.dim_names_.resize(proto.dim_names_size());
for (int64_t i = 0; i < proto.dim_names_size(); ++i) {
for (int i = 0; i < proto.dim_names_size(); ++i) {
mesh.dim_names_[i] = proto.dim_names(i);
}

for (int64_t i = 0; i < proto.devices_size(); ++i) {
for (int i = 0; i < proto.devices_size(); ++i) {
mesh.add_device(Device::from_proto(proto.devices(i)));
}

for (int64_t i = 0; i < proto.links_size(); ++i) {
for (int i = 0; i < proto.links_size(); ++i) {
mesh.add_link(Link::from_proto(proto.links(i)));
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/distributed/auto_parallel/device_mesh.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ inline bool operator!=(const Device& lhs, const Device& rhs) {
}

struct LinkCapability {
double bandwidth = 0.0; // Bytes/s
double latency = 0.0;
int64_t bandwidth = 0.0; // Bytes/s
int64_t latency = 0.0;

// LinkCapability from_string(const std::string& str);
std::string to_string() const;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/distributed/auto_parallel/dist_attr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ bool TensorDistAttr::verify_dims_mapping(
bool TensorDistAttr::verify_batch_dim(
int64_t dim, const std::vector<int64_t>& tensor_shape) const {
VLOG(4) << "[TensorDistAttr verify_batch_dim] " << dim;
int64_t ndim = tensor_shape.size();
int64_t ndim = static_cast<int64_t>(tensor_shape.size());
if (ndim > 0) {
if (dim < 0) {
dim = dim + ndim;
Expand Down Expand Up @@ -270,12 +270,12 @@ std::string TensorDistAttr::to_string() const {
void TensorDistAttr::from_proto(const TensorDistAttrProto& proto) {
process_mesh_ = ProcessMesh::from_proto(proto.process_mesh());
dims_mapping_.resize(proto.dims_mapping_size());
for (int64_t i = 0; i < proto.dims_mapping_size(); ++i) {
for (int i = 0; i < proto.dims_mapping_size(); ++i) {
dims_mapping_[i] = proto.dims_mapping(i);
}
batch_dim_ = proto.batch_dim();
dynamic_dims_.resize(proto.dynamic_dims_size());
for (int64_t i = 0; i < proto.dynamic_dims_size(); ++i) {
for (int i = 0; i < proto.dynamic_dims_size(); ++i) {
dynamic_dims_[i] = proto.dynamic_dims(i);
}
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/distributed/auto_parallel/dist_mapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,17 +72,17 @@ void DistributedMapper::set_process_id_to_device_ids(
DistributedMapper DistributedMapper::from_proto(
const DistributedMapperProto& proto) {
DistributedMapper dist_mapper;
for (int64_t i = 0; i < proto.device_meshes_size(); ++i) {
for (int i = 0; i < proto.device_meshes_size(); ++i) {
dist_mapper.device_meshes_[proto.device_meshes(i).name()] =
DeviceMesh::from_proto(proto.device_meshes(i));
}
for (int64_t i = 0; i < proto.process_id_to_device_ids_size(); ++i) {
for (int i = 0; i < proto.process_id_to_device_ids_size(); ++i) {
int64_t process_id = proto.process_id_to_device_ids(i).process_id();
std::string device_mesh_name =
proto.process_id_to_device_ids(i).device_mesh_name();
std::vector<int64_t> device_ids;
int64_t num_devices = proto.process_id_to_device_ids(i).device_ids_size();
for (int64_t j = 0; j < num_devices; ++j) {
int num_devices = proto.process_id_to_device_ids(i).device_ids_size();
for (int j = 0; j < num_devices; ++j) {
device_ids.push_back(proto.process_id_to_device_ids(i).device_ids(j));
}
dist_mapper.process_id_to_device_ids_[process_id].first = device_mesh_name;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/distributed/auto_parallel/process_mesh.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,17 +88,17 @@ ProcessMesh ProcessMesh::from_proto(const ProcessMeshProto &proto) {
ProcessMesh mesh;

mesh.shape_.resize(proto.shape_size());
for (int64_t i = 0; i < proto.shape_size(); ++i) {
for (int i = 0; i < proto.shape_size(); ++i) {
mesh.shape_[i] = proto.shape(i);
}

mesh.process_ids_.resize(proto.process_ids_size());
for (int64_t i = 0; i < proto.process_ids_size(); ++i) {
for (int i = 0; i < proto.process_ids_size(); ++i) {
mesh.process_ids_[i] = proto.process_ids(i);
}

mesh.dim_names_.resize(proto.dim_names_size());
for (int64_t i = 0; i < proto.dim_names_size(); ++i) {
for (int i = 0; i < proto.dim_names_size(); ++i) {
mesh.dim_names_[i] = proto.dim_names(i);
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/distributed/auto_parallel/reshard_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ bool IsDimsMappingReplicated(const std::vector<int64_t>& dims_mapping) {
std::vector<int64_t> GetCurRankCoordInMesh(const ProcessMesh& process_mesh) {
const auto& process_shape = process_mesh.shape();
const auto& process_ids = process_mesh.process_ids();
int64_t ndims_mesh = process_shape.size();
int64_t ndims_mesh = static_cast<int64_t>(process_shape.size());
int64_t cur_global_rank = GetCurGlobalRank();

VLOG(3) << "Searching current global rank " << cur_global_rank
Expand Down Expand Up @@ -162,7 +162,7 @@ CommContext* CreateOrGetCommContext(const DeviceContext& dev_ctx,
std::string unique_comm_key = GenUniqueCommKey(process_ids);

if (!CommContextManager::GetInstance().Has(unique_comm_key)) {
int64_t world_size = process_ids.size();
int64_t world_size = static_cast<int64_t>(process_ids.size());
int64_t rank = GetLocalRankInParticipate(process_ids);
VLOG(3) << "local world size: " << world_size << " local rank: " << rank;

Expand Down
8 changes: 5 additions & 3 deletions paddle/phi/core/distributed/store/tcp_store.cc
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ void MasterDaemon::ProcessCommands(std::vector<struct pollfd>* p_fds) {
for (size_t i = 1; i < fds.size(); i++) {
#else
// 0: listen socket, 1:controller pipe, so loop from 2.
for (size_t i = 2; i < fds.size(); i++) {
for (uint i = 2; i < fds.size(); i++) {
#endif
try {
if (fds[i].revents == 0) {
Expand Down Expand Up @@ -345,14 +345,16 @@ TCPStore::TCPStore(std::string host,
bool is_master,
size_t num_workers,
int timeout)
: Store(timeout), _is_master(is_master), _num_workers(num_workers) {
: Store(timeout),
_is_master(is_master),
_num_workers(static_cast<int>(num_workers)) {
_timeout = timeout;
PADDLE_ENFORCE_GT(
timeout, 0, phi::errors::InvalidArgument("timeout must >= %d", timeout));

VLOG(3) << "input timeout" << timeout << ", member timeout:" << _timeout;
if (_is_master) {
_server = detail::TCPServer::create(port, num_workers, timeout);
_server = detail::TCPServer::create(port, this->_num_workers, timeout);
}

_client = detail::TCPClient::connect(host, port);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ Generator::Generator(uint64_t seed, uint64_t device_id) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = device_id;
this->state_.device = static_cast<int64_t>(device_id);
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/infermeta_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ void InferMetaContext::SetMetaConfig(MetaConfig config) {
}

void InferMetaContext::EmplaceBackInput(MetaTensor input) {
int index = inputs_.size();
int index = static_cast<int>(inputs_.size());
inputs_.emplace_back(std::move(input));
input_range_.emplace_back(std::pair<int, int>(index, index + 1));
}
void InferMetaContext::EmplaceBackOutput(MetaTensor output) {
int index = outputs_.size();
int index = static_cast<int>(outputs_.size());
outputs_.emplace_back(std::move(output));
output_range_.emplace_back(std::pair<int, int>(index, index + 1));
}
Expand All @@ -36,15 +36,15 @@ void InferMetaContext::EmplaceBackAttr(Attribute attr) {

void InferMetaContext::EmplaceBackInputs(
paddle::small_vector<MetaTensor, phi::kInputSmallVectorSize> inputs) {
int index = inputs_.size();
int index = static_cast<int>(inputs_.size());
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
inputs_.insert(inputs_.end(),
std::make_move_iterator(inputs.begin()),
std::make_move_iterator(inputs.end()));
}
void InferMetaContext::EmplaceBackOutputs(
paddle::small_vector<MetaTensor, phi::kOutputSmallVectorSize> outputs) {
int index = outputs_.size();
int index = static_cast<int>(outputs_.size());
output_range_.emplace_back(
std::pair<int, int>(index, index + outputs.size()));
outputs_.insert(outputs_.end(),
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/core/kernel_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
namespace phi {

void KernelContext::EmplaceBackInput(const TensorBase* input) {
int index = inputs_.size();
int index = static_cast<int>(inputs_.size());
inputs_.emplace_back(input);
// Record the start and end index of the input
input_range_.emplace_back(std::pair<int, int>(index, index + 1));
Expand All @@ -29,7 +29,7 @@ void KernelContext::EmplaceBackInputWithoutSetRange(const TensorBase* input) {

void KernelContext::EmplaceBackInputs(
paddle::small_vector<const TensorBase*> inputs) {
int index = inputs_.size();
int index = static_cast<int>(inputs_.size());
// Record the start and end index of the input
input_range_.emplace_back(std::pair<int, int>(index, index + inputs.size()));
inputs_.insert(inputs_.end(),
Expand All @@ -45,7 +45,7 @@ void KernelContext::EmplaceBackInputsWithoutSetRange(
}

void KernelContext::EmplaceBackOutput(TensorBase* output) {
int index = outputs_.size();
int index = static_cast<int>(outputs_.size());
outputs_.emplace_back(output);
// Record the start and end index of the input
output_range_.emplace_back(std::pair<int, int>(index, index + 1));
Expand All @@ -57,7 +57,7 @@ void KernelContext::EmplaceBackOutputWithoutSetRange(TensorBase* output) {

void KernelContext::EmplaceBackOutputs(
paddle::small_vector<TensorBase*> outputs) {
int index = outputs_.size();
int index = static_cast<int>(outputs_.size());
// Record the start and end index of the input
output_range_.emplace_back(
std::pair<int, int>(index, index + outputs.size()));
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/selected_rows_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ int64_t SelectedRowsImpl::AutoGrownIndex(int64_t key,
}
auto write_iter = id_to_index_.find(key);
if (write_iter == id_to_index_.end()) {
int row_num = rows_.size();
int row_num = static_cast<int>(rows_.size());
if (row_num == value_->dims()[0]) {
rwlock_->UNLock();
PADDLE_THROW(phi::errors::InvalidArgument(
Expand Down Expand Up @@ -165,7 +165,7 @@ int64_t SelectedRowsImpl::AutoGrownIndex(int64_t key,
void SelectedRowsImpl::SyncIndex() {
rwlock_->WRLock();
id_to_index_.clear();
for (size_t i = 0; i < rows_.size(); ++i) {
for (int i = 0; i < static_cast<int>(rows_.size()); ++i) {
id_to_index_[rows_[i]] = i;
}
rwlock_->UNLock();
Expand Down
Loading

0 comments on commit b702d2a

Please sign in to comment.