Skip to content
This repository was archived by the owner on Jan 22, 2025. It is now read-only.

ci: treewide: deny used_underscore_binding #31319

Merged
merged 7 commits into from
Apr 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions ci/test-checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ nightly_clippy_allows=()
--deny=warnings \
--deny=clippy::default_trait_access \
--deny=clippy::integer_arithmetic \
--deny=clippy::used_underscore_binding \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just adding a comment here with ref to clippy-lint description: https://rust-lang.github.io/rust-clippy/master/#used_underscore_binding

"${nightly_clippy_allows[@]}"

if [[ -n $CI ]]; then
Expand Down
3 changes: 0 additions & 3 deletions core/src/ancestor_hashes_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,6 @@ mod test {
super::*,
crate::{
cluster_slot_state_verifier::{DuplicateSlotsToRepair, PurgeRepairSlotCounter},
repair_service::DuplicateSlotsResetReceiver,
replay_stage::{
tests::{replay_blockstore_components, ReplayBlockstoreComponents},
ReplayStage,
Expand Down Expand Up @@ -1039,7 +1038,6 @@ mod test {
repairable_dead_slot_pool: HashSet<Slot>,
request_throttle: Vec<u64>,
repair_stats: AncestorRepairRequestsStats,
_duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
retryable_slots_sender: RetryableSlotsSender,
retryable_slots_receiver: RetryableSlotsReceiver,
ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender,
Expand Down Expand Up @@ -1089,7 +1087,6 @@ mod test {
repairable_dead_slot_pool: HashSet::new(),
request_throttle: vec![],
repair_stats: AncestorRepairRequestsStats::default(),
_duplicate_slots_reset_receiver,
ancestor_hashes_replay_update_sender,
ancestor_hashes_replay_update_receiver,
retryable_slots_sender,
Expand Down
4 changes: 2 additions & 2 deletions core/src/tvu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ pub mod tests {
let bank_forks = Arc::new(RwLock::new(bank_forks));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let max_complete_rewards_slot = Arc::new(AtomicU64::default());
let _ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])),
Expand Down Expand Up @@ -476,7 +476,7 @@ pub mod tests {
AbsRequestSender::default(),
None,
&Arc::new(ConnectionCache::default()),
&_ignored_prioritization_fee_cache,
&ignored_prioritization_fee_cache,
BankingTracer::new_disabled(),
)
.expect("assume success");
Expand Down
15 changes: 3 additions & 12 deletions core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,6 @@ where
let shred = Shred::new_from_serialized_shred(shred.to_vec()).ok()?;
if packet.meta().repair() {
let repair_info = RepairMeta {
_from_addr: packet.meta().socket_addr(),
// If can't parse the nonce, dump the packet.
nonce: repair_response::nonce(packet)?,
};
Expand Down Expand Up @@ -292,7 +291,6 @@ where
}

struct RepairMeta {
_from_addr: SocketAddr,
nonce: Nonce,
}

Expand Down Expand Up @@ -573,10 +571,7 @@ mod test {

#[test]
fn test_prune_shreds() {
use {
crate::serve_repair::ShredRepairType,
std::net::{IpAddr, Ipv4Addr},
};
use crate::serve_repair::ShredRepairType;
solana_logger::setup();
let shred = Shred::new_from_parity_shard(
5, // slot
Expand All @@ -589,18 +584,14 @@ mod test {
0, // version
);
let mut shreds = vec![shred.clone(), shred.clone(), shred];
let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080);
let repair_meta = RepairMeta {
_from_addr,
nonce: 0,
};
let repair_meta = RepairMeta { nonce: 0 };
let outstanding_requests = Arc::new(RwLock::new(OutstandingShredRepairs::default()));
let repair_type = ShredRepairType::Orphan(9);
let nonce = outstanding_requests
.write()
.unwrap()
.add_request(repair_type, timestamp());
let repair_meta1 = RepairMeta { _from_addr, nonce };
let repair_meta1 = RepairMeta { nonce };
let mut repair_infos = vec![None, Some(repair_meta), Some(repair_meta1)];
prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, &outstanding_requests);
assert_eq!(repair_infos.len(), 2);
Expand Down
6 changes: 4 additions & 2 deletions core/tests/snapshots.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ struct SnapshotTestConfig {
full_snapshot_archives_dir: TempDir,
bank_snapshots_dir: TempDir,
accounts_dir: PathBuf,
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
// TempDir::drop from running to retain that dir for the duration of test
_accounts_tmp_dir: TempDir,
}

Expand All @@ -82,7 +84,7 @@ impl SnapshotTestConfig {
full_snapshot_archive_interval_slots: Slot,
incremental_snapshot_archive_interval_slots: Slot,
) -> SnapshotTestConfig {
let (_accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
let (accounts_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests();
let bank_snapshots_dir = TempDir::new().unwrap();
let full_snapshot_archives_dir = TempDir::new().unwrap();
let incremental_snapshot_archives_dir = TempDir::new().unwrap();
Expand Down Expand Up @@ -128,7 +130,7 @@ impl SnapshotTestConfig {
full_snapshot_archives_dir,
bank_snapshots_dir,
accounts_dir,
_accounts_tmp_dir,
_accounts_tmp_dir: accounts_tmp_dir,
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions faucet/src/faucet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -389,11 +389,11 @@ pub async fn run_faucet(
);

loop {
let _faucet = faucet.clone();
let faucet = faucet.clone();
match listener.accept().await {
Ok((stream, _)) => {
tokio::spawn(async move {
if let Err(e) = process(stream, _faucet).await {
if let Err(e) = process(stream, faucet).await {
info!("failed to process request; error = {:?}", e);
}
});
Expand Down
8 changes: 4 additions & 4 deletions ledger/src/blockstore_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ pub fn process_entries_for_tests(
})
.collect();

let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
let result = process_entries(
bank,
&mut replay_entries,
Expand All @@ -453,7 +453,7 @@ pub fn process_entries_for_tests(
replay_vote_sender,
&mut batch_timing,
None,
&_ignored_prioritization_fee_cache,
&ignored_prioritization_fee_cache,
);

debug!("process_entries: {:?}", batch_timing);
Expand Down Expand Up @@ -904,7 +904,7 @@ fn confirm_full_slot(
) -> result::Result<(), BlockstoreProcessorError> {
let mut confirmation_timing = ConfirmationTiming::default();
let skip_verification = !opts.run_verification;
let _ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);
let ignored_prioritization_fee_cache = PrioritizationFeeCache::new(0u64);

confirm_slot(
blockstore,
Expand All @@ -917,7 +917,7 @@ fn confirm_full_slot(
recyclers,
opts.allow_dead_slots,
opts.runtime_config.log_messages_bytes_limit,
&_ignored_prioritization_fee_cache,
&ignored_prioritization_fee_cache,
)?;

timing.accumulate(&confirmation_timing.batch_execute.totals);
Expand Down
22 changes: 11 additions & 11 deletions perf/src/cuda_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ use {

const CUDA_SUCCESS: c_int = 0;

fn pin<T>(_mem: &mut Vec<T>) {
fn pin<T>(mem: &mut Vec<T>) {
if let Some(api) = perf_libs::api() {
use std::{ffi::c_void, mem::size_of};

let ptr = _mem.as_mut_ptr();
let size = _mem.capacity().saturating_mul(size_of::<T>());
let ptr = mem.as_mut_ptr();
let size = mem.capacity().saturating_mul(size_of::<T>());
let err = unsafe {
(api.cuda_host_register)(ptr as *mut c_void, size, /*flags=*/ 0)
};
Expand All @@ -39,14 +39,14 @@ fn pin<T>(_mem: &mut Vec<T>) {
}
}

fn unpin<T>(_mem: *mut T) {
fn unpin<T>(mem: *mut T) {
if let Some(api) = perf_libs::api() {
use std::ffi::c_void;

let err = unsafe { (api.cuda_host_unregister)(_mem as *mut c_void) };
let err = unsafe { (api.cuda_host_unregister)(mem as *mut c_void) };
assert!(
err == CUDA_SUCCESS,
"cudaHostUnregister returned: {err} ptr: {_mem:?}"
"cudaHostUnregister returned: {err} ptr: {mem:?}"
);
}
}
Expand Down Expand Up @@ -277,21 +277,21 @@ impl<T: Clone + Default + Sized> PinnedVec<T> {
self.x.shuffle(rng)
}

fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) {
fn check_ptr(&mut self, old_ptr: *mut T, old_capacity: usize, from: &'static str) {
let api = perf_libs::api();
if api.is_some()
&& self.pinnable
&& (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity)
&& (self.x.as_ptr() != old_ptr || self.x.capacity() != old_capacity)
{
if self.pinned {
unpin(_old_ptr);
unpin(old_ptr);
}

trace!(
"pinning from check_ptr old: {} size: {} from: {}",
_old_capacity,
old_capacity,
self.x.capacity(),
_from
from
);
pin(&mut self.x);
self.pinned = true;
Expand Down
8 changes: 4 additions & 4 deletions rpc-test/tests/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -297,9 +297,9 @@ fn test_rpc_subscriptions() {
let status_sender = status_sender.clone();
let signature_subscription_ready_clone = signature_subscription_ready_clone.clone();
tokio::spawn({
let _pubsub_client = Arc::clone(&pubsub_client);
let pubsub_client = Arc::clone(&pubsub_client);
async move {
let (mut sig_notifications, sig_unsubscribe) = _pubsub_client
let (mut sig_notifications, sig_unsubscribe) = pubsub_client
.signature_subscribe(
&signature,
Some(RpcSignatureSubscribeConfig {
Expand All @@ -324,9 +324,9 @@ fn test_rpc_subscriptions() {
let account_sender = account_sender.clone();
let account_subscription_ready_clone = account_subscription_ready_clone.clone();
tokio::spawn({
let _pubsub_client = Arc::clone(&pubsub_client);
let pubsub_client = Arc::clone(&pubsub_client);
async move {
let (mut account_notifications, account_unsubscribe) = _pubsub_client
let (mut account_notifications, account_unsubscribe) = pubsub_client
.account_subscribe(
&pubkey,
Some(RpcAccountInfoConfig {
Expand Down
4 changes: 2 additions & 2 deletions runtime/src/accounts_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3404,10 +3404,10 @@ pub mod tests {
index.unchecked_scan_accounts(
"",
&Ancestors::default(),
|pubkey, _index| {
|pubkey, index| {
if pubkey == &key {
found_key = true;
assert_eq!(_index, (&true, 3));
assert_eq!(index, (&true, 3));
};
num += 1
},
Expand Down
1 change: 1 addition & 0 deletions sdk/program/src/pubkey.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ impl TryFrom<&str> for Pubkey {
}
}

#[allow(clippy::used_underscore_binding)]
pub fn bytes_are_curve_point<T: AsRef<[u8]>>(_bytes: T) -> bool {
#[cfg(not(target_os = "solana"))]
{
Expand Down
1 change: 1 addition & 0 deletions sdk/program/src/vote/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,7 @@ impl VoteState {
3762 // see test_vote_state_size_of.
}

#[allow(clippy::used_underscore_binding)]
pub fn deserialize(_input: &[u8]) -> Result<Self, InstructionError> {
#[cfg(not(target_os = "solana"))]
{
Expand Down
8 changes: 5 additions & 3 deletions tokens/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,12 @@ fn main() -> Result<(), Box<dyn Error>> {
let client = RpcClient::new(json_rpc_url);

let exit = Arc::new(AtomicBool::default());
let _exit = exit.clone();
// Initialize CTRL-C handler to ensure db changes are written before exit.
ctrlc::set_handler(move || {
_exit.store(true, Ordering::SeqCst);
ctrlc::set_handler({
let exit = exit.clone();
move || {
exit.store(true, Ordering::SeqCst);
}
})
.expect("Error setting Ctrl-C handler");

Expand Down