From 15e57f90555d94e1cbf2b1f9feb387b1f24dac05 Mon Sep 17 00:00:00 2001 From: Ariadne Conill Date: Sat, 29 Jun 2024 00:12:43 -0700 Subject: [PATCH] feat(power-management-core): appease format checking Signed-off-by: Ariadne Conill --- crates/ctl/src/cli/cpu_topology.rs | 19 +++++++--- crates/ctl/src/cli/mod.rs | 6 ++-- crates/daemon/src/control.rs | 34 ++++++++++++------ crates/runtime/src/power.rs | 57 ++++++++++++++++++++++-------- 4 files changed, 84 insertions(+), 32 deletions(-) diff --git a/crates/ctl/src/cli/cpu_topology.rs b/crates/ctl/src/cli/cpu_topology.rs index ca9eeed..3bc61fb 100644 --- a/crates/ctl/src/cli/cpu_topology.rs +++ b/crates/ctl/src/cli/cpu_topology.rs @@ -9,7 +9,7 @@ fn class_to_str(input: i32) -> String { 0 => "Standard".to_string(), 1 => "Performance".to_string(), 2 => "Efficiency".to_string(), - _ => "???".to_string() + _ => "???".to_string(), } } @@ -19,7 +19,10 @@ pub struct CpuTopologyCommand {} impl CpuTopologyCommand { pub async fn run(self, mut client: ControlServiceClient) -> Result<()> { - println!("{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", "CPUID", "Node", "Socket", "Core", "Thread", "Class"); + println!( + "{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", + "CPUID", "Node", "Socket", "Core", "Thread", "Class" + ); let response = client .get_host_cpu_topology(Request::new(HostCpuTopologyRequest {})) @@ -28,10 +31,18 @@ impl CpuTopologyCommand { let mut i = 0; for cpu in response.cpus { - println!("{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", i, cpu.node, cpu.socket, cpu.core, cpu.thread, class_to_str(cpu.class)); + println!( + "{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", + i, + cpu.node, + cpu.socket, + cpu.core, + cpu.thread, + class_to_str(cpu.class) + ); i += 1; } - + Ok(()) } } diff --git a/crates/ctl/src/cli/mod.rs b/crates/ctl/src/cli/mod.rs index 93d368d..43c7516 100644 --- a/crates/ctl/src/cli/mod.rs +++ b/crates/ctl/src/cli/mod.rs @@ -24,9 +24,9 @@ use krata::{ use tonic::{transport::Channel, Request}; use self::{ - attach::AttachCommand, cpu_topology::CpuTopologyCommand, destroy::DestroyCommand, exec::ExecCommand, - identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, launch::LaunchCommand, - list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand, + attach::AttachCommand, cpu_topology::CpuTopologyCommand, destroy::DestroyCommand, + exec::ExecCommand, identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, + launch::LaunchCommand, list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand, metrics::MetricsCommand, pull::PullCommand, resolve::ResolveCommand, top::TopCommand, watch::WatchCommand, }; diff --git a/crates/daemon/src/control.rs b/crates/daemon/src/control.rs index 579de1b..0af24b3 100644 --- a/crates/daemon/src/control.rs +++ b/crates/daemon/src/control.rs @@ -11,12 +11,12 @@ use krata::{ control::{ control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest, CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest, - DeviceInfo, ExecGuestReply, ExecGuestRequest, IdentifyHostReply, IdentifyHostRequest, - ListDevicesReply, ListDevicesRequest, ListGuestsReply, ListGuestsRequest, - PullImageReply, PullImageRequest, ReadGuestMetricsReply, ReadGuestMetricsRequest, - ResolveGuestReply, ResolveGuestRequest, SnoopIdmReply, SnoopIdmRequest, - WatchEventsReply, WatchEventsRequest, HostCpuTopologyRequest, HostCpuTopologyReply, - HostCpuTopologyInfo, HostPowerManagementPolicy, + DeviceInfo, ExecGuestReply, ExecGuestRequest, HostCpuTopologyInfo, + HostCpuTopologyReply, HostCpuTopologyRequest, HostPowerManagementPolicy, + IdentifyHostReply, IdentifyHostRequest, ListDevicesReply, ListDevicesRequest, + ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest, + ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest, + SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, }, }, }; @@ -559,7 +559,11 @@ impl ControlService for DaemonControlService { request: Request, ) -> Result, Status> { let _ = request.into_inner(); - let power = self.runtime.power_management_context().await.map_err(ApiError::from)?; + let power = self + .runtime + .power_management_context() + .await + .map_err(ApiError::from)?; let cputopo = power.cpu_topology().await.map_err(ApiError::from)?; let mut cpus = vec![]; @@ -581,11 +585,21 @@ impl ControlService for DaemonControlService { request: Request, ) -> Result, Status> { let policy = request.into_inner(); - let power = self.runtime.power_management_context().await.map_err(ApiError::from)?; + let power = self + .runtime + .power_management_context() + .await + .map_err(ApiError::from)?; let scheduler = &policy.scheduler; - power.set_smt_policy(policy.smt_awareness).await.map_err(ApiError::from)?; - power.set_scheduler_policy(scheduler).await.map_err(ApiError::from)?; + power + .set_smt_policy(policy.smt_awareness) + .await + .map_err(ApiError::from)?; + power + .set_scheduler_policy(scheduler) + .await + .map_err(ApiError::from)?; Ok(Response::new(HostPowerManagementPolicy { scheduler: scheduler.to_string(), diff --git a/crates/runtime/src/power.rs b/crates/runtime/src/power.rs index f5dec7f..3f92375 100644 --- a/crates/runtime/src/power.rs +++ b/crates/runtime/src/power.rs @@ -32,19 +32,24 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { for item in input { if cores.is_empty() { - cores.insert((item.core, item.socket, item.node), vec![ - CpuTopologyInfo { + cores.insert( + (item.core, item.socket, item.node), + vec![CpuTopologyInfo { core: item.core, socket: item.socket, thread: 0, node: item.node, class: CpuClass::Standard, - } - ]); + }], + ); continue; } - - if last.map(|last| (item.core - last.core) >= 2).unwrap_or(false) { // detect if performance cores seem to be kicking in. + + if last + .map(|last| (item.core - last.core) >= 2) + .unwrap_or(false) + { + // detect if performance cores seem to be kicking in. if let Some(last) = last { if let Some(list) = cores.get_mut(&(last.core, last.socket, last.node)) { for other in list { @@ -52,7 +57,9 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { } } } - let list = cores.entry((item.core, item.socket, item.node)).or_default(); + let list = cores + .entry((item.core, item.socket, item.node)) + .or_default(); for old in &mut *list { old.class = CpuClass::Performance; } @@ -64,8 +71,11 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { class: CpuClass::Performance, }); pe_cores = true; - } else if pe_cores && last.map(|last| item.core == last.core + 1).unwrap_or(false) { // detect efficiency cores if P/E cores are in use. - let list = cores.entry((item.core, item.socket, item.node)).or_default(); + } else if pe_cores && last.map(|last| item.core == last.core + 1).unwrap_or(false) { + // detect efficiency cores if P/E cores are in use. + let list = cores + .entry((item.core, item.socket, item.node)) + .or_default(); list.push(CpuTopologyInfo { core: item.core, socket: item.socket, @@ -74,7 +84,9 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { class: CpuClass::Efficiency, }); } else { - let list = cores.entry((item.core, item.socket, item.node)).or_default(); + let list = cores + .entry((item.core, item.socket, item.node)) + .or_default(); if list.is_empty() { list.push(CpuTopologyInfo { core: item.core, @@ -89,7 +101,10 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { socket: item.socket, thread: 0, node: item.node, - class: list.first().map(|first| first.class).unwrap_or(CpuClass::Standard), + class: list + .first() + .map(|first| first.class) + .unwrap_or(CpuClass::Standard), }); } } @@ -101,8 +116,12 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec { thread.thread = index as u32; } } - - cores.into_values().into_iter().flatten().collect::>() + + cores + .into_values() + .into_iter() + .flatten() + .collect::>() } impl PowerManagementContext { @@ -119,13 +138,21 @@ impl PowerManagementContext { /// Enable or disable SMT awareness in the scheduler. pub async fn set_smt_policy(&self, enable: bool) -> Result<()> { - self.context.xen.call.set_turbo_mode(CpuId::All, enable).await?; + self.context + .xen + .call + .set_turbo_mode(CpuId::All, enable) + .await?; Ok(()) } /// Set scheduler policy name. pub async fn set_scheduler_policy(&self, policy: impl AsRef) -> Result<()> { - self.context.xen.call.set_cpufreq_gov(CpuId::All, policy).await?; + self.context + .xen + .call + .set_cpufreq_gov(CpuId::All, policy) + .await?; Ok(()) } }