mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 05:10:55 +00:00
chore(code): simple code cleanup
This commit is contained in:
parent
2a107a370f
commit
2a4802a75d
@ -51,7 +51,7 @@ impl ControlCommand {
|
|||||||
|
|
||||||
ControlCommands::Device(device) => device.run(client, events).await,
|
ControlCommands::Device(device) => device.run(client, events).await,
|
||||||
|
|
||||||
ControlCommands::Host(snoop) => snoop.run(client, events).await,
|
ControlCommands::Host(host) => host.run(client, events).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ impl ZoneTopApp {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -265,7 +265,7 @@ impl ControlService for DaemonControlService {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ impl DaemonIdm {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ impl ZoneReconciler {
|
|||||||
error!("runtime reconciler failed: {}", error);
|
error!("runtime reconciler failed: {}", error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ use super::{
|
|||||||
|
|
||||||
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
|
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
|
||||||
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
|
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
|
||||||
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
|
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, Sender<R>>>>;
|
||||||
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
|
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
|
||||||
|
|
||||||
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
||||||
|
@ -187,7 +187,7 @@ impl AutoNetworkWatcher {
|
|||||||
_ = sleep(Duration::from_secs(10)) => {
|
_ = sleep(Duration::from_secs(10)) => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ async fn main() -> Result<()> {
|
|||||||
let (context, mut receiver) = OciProgressContext::create();
|
let (context, mut receiver) = OciProgressContext::create();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
if (receiver.changed().await).is_err() {
|
if receiver.changed().await.is_err() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let progress = receiver.borrow_and_update();
|
let progress = receiver.borrow_and_update();
|
||||||
|
@ -97,13 +97,13 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} else {
|
} else {
|
||||||
select! {
|
select! {
|
||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Some(writer) = writer {
|
if let Some(writer) = writer {
|
||||||
@ -172,13 +172,13 @@ impl OciPackerBackend for OciPackerMkfsErofs {
|
|||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} else {
|
} else {
|
||||||
select! {
|
select! {
|
||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Some(writer) = writer {
|
if let Some(writer) = writer {
|
||||||
|
@ -228,7 +228,7 @@ impl OciBoundProgress {
|
|||||||
context.update(&progress);
|
context.update(&progress);
|
||||||
let mut receiver = self.context.subscribe();
|
let mut receiver = self.context.subscribe();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
while (receiver.changed().await).is_ok() {
|
while receiver.changed().await.is_ok() {
|
||||||
context
|
context
|
||||||
.sender
|
.sender
|
||||||
.send_replace(receiver.borrow_and_update().clone());
|
.send_replace(receiver.borrow_and_update().clone());
|
||||||
|
@ -503,7 +503,7 @@ impl KrataChannelBackendProcessor {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -130,8 +130,7 @@ impl<P: BootSetupPlatform> XenClient<P> {
|
|||||||
match self.init(created.domid, config, &created).await {
|
match self.init(created.domid, config, &created).await {
|
||||||
Ok(_) => Ok(created),
|
Ok(_) => Ok(created),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// ignore since destroying a domain is best
|
// ignore since destroying a domain is best-effort when an error occurs
|
||||||
// effort when an error occurs
|
|
||||||
let _ = self.domain_manager.destroy(created.domid).await;
|
let _ = self.domain_manager.destroy(created.domid).await;
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
|
@ -185,9 +185,9 @@ impl EventChannelProcessor {
|
|||||||
if self.flag.load(Ordering::Acquire) {
|
if self.flag.load(Ordering::Acquire) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
warn!("failed to process event channel notifications: {}", error);
|
};
|
||||||
}
|
}
|
||||||
});
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ impl XsdSocketProcessor {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ impl ZoneBackground {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user