feat(power-management-core): appease format checking

Signed-off-by: Ariadne Conill <ariadne@ariadne.space>
This commit is contained in:
Ariadne Conill 2024-06-29 00:12:43 -07:00
parent 82b2b976b4
commit 15e57f9055
4 changed files with 84 additions and 32 deletions

View File

@ -9,7 +9,7 @@ fn class_to_str(input: i32) -> String {
0 => "Standard".to_string(), 0 => "Standard".to_string(),
1 => "Performance".to_string(), 1 => "Performance".to_string(),
2 => "Efficiency".to_string(), 2 => "Efficiency".to_string(),
_ => "???".to_string() _ => "???".to_string(),
} }
} }
@ -19,7 +19,10 @@ pub struct CpuTopologyCommand {}
impl CpuTopologyCommand { impl CpuTopologyCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> { pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
println!("{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", "CPUID", "Node", "Socket", "Core", "Thread", "Class"); println!(
"{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}",
"CPUID", "Node", "Socket", "Core", "Thread", "Class"
);
let response = client let response = client
.get_host_cpu_topology(Request::new(HostCpuTopologyRequest {})) .get_host_cpu_topology(Request::new(HostCpuTopologyRequest {}))
@ -28,10 +31,18 @@ impl CpuTopologyCommand {
let mut i = 0; let mut i = 0;
for cpu in response.cpus { for cpu in response.cpus {
println!("{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}", i, cpu.node, cpu.socket, cpu.core, cpu.thread, class_to_str(cpu.class)); println!(
"{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}",
i,
cpu.node,
cpu.socket,
cpu.core,
cpu.thread,
class_to_str(cpu.class)
);
i += 1; i += 1;
} }
Ok(()) Ok(())
} }
} }

View File

@ -24,9 +24,9 @@ use krata::{
use tonic::{transport::Channel, Request}; use tonic::{transport::Channel, Request};
use self::{ use self::{
attach::AttachCommand, cpu_topology::CpuTopologyCommand, destroy::DestroyCommand, exec::ExecCommand, attach::AttachCommand, cpu_topology::CpuTopologyCommand, destroy::DestroyCommand,
identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, launch::LaunchCommand, exec::ExecCommand, identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand,
list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand, launch::LaunchCommand, list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand,
metrics::MetricsCommand, pull::PullCommand, resolve::ResolveCommand, top::TopCommand, metrics::MetricsCommand, pull::PullCommand, resolve::ResolveCommand, top::TopCommand,
watch::WatchCommand, watch::WatchCommand,
}; };

View File

@ -11,12 +11,12 @@ use krata::{
control::{ control::{
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest, control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest, CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
DeviceInfo, ExecGuestReply, ExecGuestRequest, IdentifyHostReply, IdentifyHostRequest, DeviceInfo, ExecGuestReply, ExecGuestRequest, HostCpuTopologyInfo,
ListDevicesReply, ListDevicesRequest, ListGuestsReply, ListGuestsRequest, HostCpuTopologyReply, HostCpuTopologyRequest, HostPowerManagementPolicy,
PullImageReply, PullImageRequest, ReadGuestMetricsReply, ReadGuestMetricsRequest, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply, ListDevicesRequest,
ResolveGuestReply, ResolveGuestRequest, SnoopIdmReply, SnoopIdmRequest, ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
WatchEventsReply, WatchEventsRequest, HostCpuTopologyRequest, HostCpuTopologyReply, ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
HostCpuTopologyInfo, HostPowerManagementPolicy, SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
}, },
}, },
}; };
@ -559,7 +559,11 @@ impl ControlService for DaemonControlService {
request: Request<HostCpuTopologyRequest>, request: Request<HostCpuTopologyRequest>,
) -> Result<Response<HostCpuTopologyReply>, Status> { ) -> Result<Response<HostCpuTopologyReply>, Status> {
let _ = request.into_inner(); let _ = request.into_inner();
let power = self.runtime.power_management_context().await.map_err(ApiError::from)?; let power = self
.runtime
.power_management_context()
.await
.map_err(ApiError::from)?;
let cputopo = power.cpu_topology().await.map_err(ApiError::from)?; let cputopo = power.cpu_topology().await.map_err(ApiError::from)?;
let mut cpus = vec![]; let mut cpus = vec![];
@ -581,11 +585,21 @@ impl ControlService for DaemonControlService {
request: Request<HostPowerManagementPolicy>, request: Request<HostPowerManagementPolicy>,
) -> Result<Response<HostPowerManagementPolicy>, Status> { ) -> Result<Response<HostPowerManagementPolicy>, Status> {
let policy = request.into_inner(); let policy = request.into_inner();
let power = self.runtime.power_management_context().await.map_err(ApiError::from)?; let power = self
.runtime
.power_management_context()
.await
.map_err(ApiError::from)?;
let scheduler = &policy.scheduler; let scheduler = &policy.scheduler;
power.set_smt_policy(policy.smt_awareness).await.map_err(ApiError::from)?; power
power.set_scheduler_policy(scheduler).await.map_err(ApiError::from)?; .set_smt_policy(policy.smt_awareness)
.await
.map_err(ApiError::from)?;
power
.set_scheduler_policy(scheduler)
.await
.map_err(ApiError::from)?;
Ok(Response::new(HostPowerManagementPolicy { Ok(Response::new(HostPowerManagementPolicy {
scheduler: scheduler.to_string(), scheduler: scheduler.to_string(),

View File

@ -32,19 +32,24 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
for item in input { for item in input {
if cores.is_empty() { if cores.is_empty() {
cores.insert((item.core, item.socket, item.node), vec![ cores.insert(
CpuTopologyInfo { (item.core, item.socket, item.node),
vec![CpuTopologyInfo {
core: item.core, core: item.core,
socket: item.socket, socket: item.socket,
thread: 0, thread: 0,
node: item.node, node: item.node,
class: CpuClass::Standard, class: CpuClass::Standard,
} }],
]); );
continue; continue;
} }
if last.map(|last| (item.core - last.core) >= 2).unwrap_or(false) { // detect if performance cores seem to be kicking in. if last
.map(|last| (item.core - last.core) >= 2)
.unwrap_or(false)
{
// detect if performance cores seem to be kicking in.
if let Some(last) = last { if let Some(last) = last {
if let Some(list) = cores.get_mut(&(last.core, last.socket, last.node)) { if let Some(list) = cores.get_mut(&(last.core, last.socket, last.node)) {
for other in list { for other in list {
@ -52,7 +57,9 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
} }
} }
} }
let list = cores.entry((item.core, item.socket, item.node)).or_default(); let list = cores
.entry((item.core, item.socket, item.node))
.or_default();
for old in &mut *list { for old in &mut *list {
old.class = CpuClass::Performance; old.class = CpuClass::Performance;
} }
@ -64,8 +71,11 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
class: CpuClass::Performance, class: CpuClass::Performance,
}); });
pe_cores = true; pe_cores = true;
} else if pe_cores && last.map(|last| item.core == last.core + 1).unwrap_or(false) { // detect efficiency cores if P/E cores are in use. } else if pe_cores && last.map(|last| item.core == last.core + 1).unwrap_or(false) {
let list = cores.entry((item.core, item.socket, item.node)).or_default(); // detect efficiency cores if P/E cores are in use.
let list = cores
.entry((item.core, item.socket, item.node))
.or_default();
list.push(CpuTopologyInfo { list.push(CpuTopologyInfo {
core: item.core, core: item.core,
socket: item.socket, socket: item.socket,
@ -74,7 +84,9 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
class: CpuClass::Efficiency, class: CpuClass::Efficiency,
}); });
} else { } else {
let list = cores.entry((item.core, item.socket, item.node)).or_default(); let list = cores
.entry((item.core, item.socket, item.node))
.or_default();
if list.is_empty() { if list.is_empty() {
list.push(CpuTopologyInfo { list.push(CpuTopologyInfo {
core: item.core, core: item.core,
@ -89,7 +101,10 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
socket: item.socket, socket: item.socket,
thread: 0, thread: 0,
node: item.node, node: item.node,
class: list.first().map(|first| first.class).unwrap_or(CpuClass::Standard), class: list
.first()
.map(|first| first.class)
.unwrap_or(CpuClass::Standard),
}); });
} }
} }
@ -101,8 +116,12 @@ fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
thread.thread = index as u32; thread.thread = index as u32;
} }
} }
cores.into_values().into_iter().flatten().collect::<Vec<_>>() cores
.into_values()
.into_iter()
.flatten()
.collect::<Vec<_>>()
} }
impl PowerManagementContext { impl PowerManagementContext {
@ -119,13 +138,21 @@ impl PowerManagementContext {
/// Enable or disable SMT awareness in the scheduler. /// Enable or disable SMT awareness in the scheduler.
pub async fn set_smt_policy(&self, enable: bool) -> Result<()> { pub async fn set_smt_policy(&self, enable: bool) -> Result<()> {
self.context.xen.call.set_turbo_mode(CpuId::All, enable).await?; self.context
.xen
.call
.set_turbo_mode(CpuId::All, enable)
.await?;
Ok(()) Ok(())
} }
/// Set scheduler policy name. /// Set scheduler policy name.
pub async fn set_scheduler_policy(&self, policy: impl AsRef<str>) -> Result<()> { pub async fn set_scheduler_policy(&self, policy: impl AsRef<str>) -> Result<()> {
self.context.xen.call.set_cpufreq_gov(CpuId::All, policy).await?; self.context
.xen
.call
.set_cpufreq_gov(CpuId::All, policy)
.await?;
Ok(()) Ok(())
} }
} }