From d4ae6a36d62c5691c08aa4c005e887914ef02faf Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 19 Jul 2023 19:21:56 -0500 Subject: [PATCH] scx: Fix some clippy warnings in Atropos If you run clippy on atropos, it notices a few places where can clean up the rust code. Let's do that. Note that we're skipping this suggestion, as it harms readability: cargo clippy --manifest-path=scx_atropos/Cargo.toml --release Checking scx_atropos v0.5.0 (/home/void/upstream/sched_ext/tools/sched_ext/scx_atropos) warning: manual implementation of `Option::map` --> src/main.rs:662:9 | 662 | / match tasks_by_load 663 | | .into_iter() 664 | | .skip_while(|(_, task)| { 665 | | task.migrated.get() ... | 672 | | None => None, 673 | | } | |_________^ | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#manual_map = note: `#[warn(clippy::manual_map)]` on by default help: try this | 662 ~ tasks_by_load 663 + .into_iter() 664 + .skip_while(|(_, task)| { 665 + task.migrated.get() 666 + || (task.dom_mask & (1 << pull_dom) == 0) 667 + || (skip_kworkers && task.is_kworker) 668 + }) 669 + .next().map(|(OrderedFloat(load), task)| (*load, task)) | warning: called `skip_while(

).next()` on an `Iterator` --> src/main.rs:662:15 | 662 | match tasks_by_load | _______________^ 663 | | .into_iter() 664 | | .skip_while(|(_, task)| { 665 | | task.migrated.get() ... | 668 | | }) 669 | | .next() | |___________________^ | = help: this is more succinctly expressed by calling `.find(!

)` instead = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#skip_while_next = note: `#[warn(clippy::skip_while_next)]` on by default Signed-off-by: David Vernet --- tools/sched_ext/scx_atropos/build.rs | 2 +- tools/sched_ext/scx_atropos/src/main.rs | 34 ++++++++++++------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/sched_ext/scx_atropos/build.rs b/tools/sched_ext/scx_atropos/build.rs index 26e792c5e17e91..bb56928ff0b599 100644 --- a/tools/sched_ext/scx_atropos/build.rs +++ b/tools/sched_ext/scx_atropos/build.rs @@ -50,7 +50,7 @@ fn gen_bpf_sched(name: &str) { .source(src.clone()) .clang(clang) .clang_args(bpf_cflags) - .build_and_generate(&skel) + .build_and_generate(skel) .unwrap(); println!("cargo:rerun-if-changed={}", src); } diff --git a/tools/sched_ext/scx_atropos/src/main.rs b/tools/sched_ext/scx_atropos/src/main.rs index aebbcd7b0bcee3..a411b6eeb9f1f9 100644 --- a/tools/sched_ext/scx_atropos/src/main.rs +++ b/tools/sched_ext/scx_atropos/src/main.rs @@ -159,11 +159,11 @@ fn format_cpumask(cpumask: &[u64], nr_cpus: usize) -> String { } fn read_total_cpu(reader: &procfs::ProcReader) -> Result { - Ok(reader + reader .read_stat() .context("Failed to read procfs")? .total_cpu - .ok_or_else(|| anyhow!("Could not read total cpu stat in proc"))?) + .ok_or_else(|| anyhow!("Could not read total cpu stat in proc")) } fn calc_util(curr: &procfs::CpuStat, prev: &procfs::CpuStat) -> Result { @@ -205,9 +205,9 @@ fn calc_util(curr: &procfs::CpuStat, prev: &procfs::CpuStat) -> Result { user_usec + system_usec + nice_usec + irq_usec + softirq_usec + stolen_usec; let total_usec = idle_usec + busy_usec + iowait_usec; if total_usec > 0 { - return Ok(((busy_usec as f64) / (total_usec as f64)).clamp(0.0, 1.0)); + Ok(((busy_usec as f64) / (total_usec as f64)).clamp(0.0, 1.0)) } else { - return Ok(1.0); + Ok(1.0) } } _ => { @@ -319,8 +319,8 @@ impl Topology { }; cpu_to_cache.push(id); - if id.is_some() { - cache_ids.insert(id.unwrap()); + if let Some(id) = id { + cache_ids.insert(id); } } @@ -349,13 +349,13 @@ impl Topology { // Build and return dom -> cpumask and cpu -> dom mappings. let mut dom_cpus = - vec![bitvec![u64, Lsb0; 0; atropos_sys::MAX_CPUS as usize]; nr_doms as usize]; + vec![bitvec![u64, Lsb0; 0; atropos_sys::MAX_CPUS as usize]; nr_doms]; let mut cpu_dom = vec![]; - for cpu in 0..nr_cpus { - match cpu_to_cache[cpu] { + for (cpu, cache) in cpu_to_cache.iter().enumerate().take(nr_cpus) { + match cache { Some(cache_id) => { - let dom_id = cache_to_dom[&cache_id]; + let dom_id = cache_to_dom[cache_id]; dom_cpus[dom_id].set(cpu, true); cpu_dom.push(Some(dom_id)); } @@ -868,7 +868,7 @@ impl<'a> Scheduler<'a> { } // Initialize skel according to @opts. - let top = Arc::new(if opts.cpumasks.len() > 0 { + let top = Arc::new(if !opts.cpumasks.is_empty() { Topology::from_cpumasks(&opts.cpumasks, nr_cpus)? } else { Topology::from_cache_level(opts.cache_level, nr_cpus)? @@ -939,7 +939,7 @@ impl<'a> Scheduler<'a> { } fn get_cpu_busy(&mut self) -> Result { - let total_cpu = read_total_cpu(&mut self.proc_reader)?; + let total_cpu = read_total_cpu(&self.proc_reader)?; let busy = match (&self.prev_total_cpu, &total_cpu) { ( procfs::CpuStat { @@ -998,7 +998,7 @@ impl<'a> Scheduler<'a> { for stat in 0..atropos_sys::stat_idx_ATROPOS_NR_STATS { let cpu_stat_vec = stats_map - .lookup_percpu(&(stat as u32).to_ne_bytes(), libbpf_rs::MapFlags::ANY) + .lookup_percpu(&stat.to_ne_bytes(), libbpf_rs::MapFlags::ANY) .with_context(|| format!("Failed to lookup stat {}", stat))? .expect("per-cpu stat should exist"); let sum = cpu_stat_vec @@ -1013,7 +1013,7 @@ impl<'a> Scheduler<'a> { .sum(); stats_map .update_percpu( - &(stat as u32).to_ne_bytes(), + &stat.to_ne_bytes(), &zero_vec, libbpf_rs::MapFlags::ANY, ) @@ -1025,12 +1025,12 @@ impl<'a> Scheduler<'a> { fn report( &mut self, - stats: &Vec, + stats: &[u64], cpu_busy: f64, processing_dur: Duration, load_avg: f64, - dom_loads: &Vec, - imbal: &Vec, + dom_loads: &[f64], + imbal: &[f64], ) { let stat = |idx| stats[idx as usize]; let total = stat(atropos_sys::stat_idx_ATROPOS_STAT_WAKE_SYNC)