krata: work on parallel reconciliation

This commit is contained in:
Alex Zenla 2024-04-02 00:56:18 +00:00
parent 6a2f1e6517
commit 8dd3cc7692
No known key found for this signature in database
GPG Key ID: 067B238899B51269
27 changed files with 582 additions and 428 deletions

102
Cargo.lock generated
View File

@ -391,21 +391,24 @@ version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "cli-tables"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50023707b9ed841a0d2b92e5d3fe650a15ab42b24f3e7da4e1d7f25b8ddf2357"
dependencies = [
"terminal_size",
]
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "comfy-table"
version = "7.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686"
dependencies = [
"crossterm",
"strum",
"strum_macros",
"unicode-width",
]
[[package]]
name = "cpufeatures"
version = "0.2.12"
@ -1169,17 +1172,6 @@ dependencies = [
"hashbrown 0.14.3",
]
[[package]]
name = "io-lifetimes"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
"hermit-abi",
"libc",
"windows-sys 0.48.0",
]
[[package]]
name = "ipnet"
version = "2.9.0"
@ -1278,7 +1270,7 @@ dependencies = [
"anyhow",
"async-stream",
"clap",
"cli-tables",
"comfy-table",
"crossterm",
"ctrlc",
"env_logger",
@ -1435,6 +1427,7 @@ dependencies = [
"log",
"nix 0.28.0",
"thiserror",
"tokio",
"uuid",
]
@ -1442,6 +1435,7 @@ dependencies = [
name = "krata-xenclient"
version = "0.0.3"
dependencies = [
"async-trait",
"elf",
"env_logger",
"flate2",
@ -1450,6 +1444,7 @@ dependencies = [
"libc",
"log",
"memchr",
"nix 0.28.0",
"slice-copy",
"thiserror",
"tokio",
@ -1517,12 +1512,6 @@ dependencies = [
"windows-targets 0.52.4",
]
[[package]]
name = "linux-raw-sys"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
version = "0.4.13"
@ -2233,20 +2222,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustix"
version = "0.37.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2"
dependencies = [
"bitflags 1.3.2",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys 0.3.8",
"windows-sys 0.48.0",
]
[[package]]
name = "rustix"
version = "0.38.32"
@ -2256,7 +2231,7 @@ dependencies = [
"bitflags 2.5.0",
"errno",
"libc",
"linux-raw-sys 0.4.13",
"linux-raw-sys",
"windows-sys 0.52.0",
]
@ -2545,6 +2520,25 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01"
[[package]]
name = "strum"
version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
[[package]]
name = "strum_macros"
version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.57",
]
[[package]]
name = "subtle"
version = "2.5.0"
@ -2606,20 +2600,10 @@ checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1"
dependencies = [
"cfg-if",
"fastrand",
"rustix 0.38.32",
"rustix",
"windows-sys 0.52.0",
]
[[package]]
name = "terminal_size"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
dependencies = [
"rustix 0.37.27",
"windows-sys 0.48.0",
]
[[package]]
name = "thiserror"
version = "1.0.58"
@ -2911,6 +2895,12 @@ dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
@ -3074,7 +3064,7 @@ dependencies = [
"either",
"home",
"once_cell",
"rustix 0.38.32",
"rustix",
]
[[package]]
@ -3275,8 +3265,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
dependencies = [
"libc",
"linux-raw-sys 0.4.13",
"rustix 0.38.32",
"linux-raw-sys",
"rustix",
]
[[package]]

View File

@ -31,7 +31,7 @@ backhand = "0.15.0"
byteorder = "1"
bytes = "1.5.0"
cgroups-rs = "0.3.4"
cli-tables = "0.2.1"
comfy-table = "7.1.0"
crossterm = "0.27.0"
ctrlc = "3.4.4"
elf = "0.7.4"

View File

@ -12,7 +12,7 @@ resolver = "2"
anyhow = { workspace = true }
async-stream = { workspace = true }
clap = { workspace = true }
cli-tables = { workspace = true }
comfy-table = { workspace = true }
crossterm = { workspace = true }
ctrlc = { workspace = true, features = ["termination"] }
env_logger = { workspace = true }

View File

@ -1,10 +1,10 @@
use anyhow::{anyhow, Result};
use clap::{Parser, ValueEnum};
use cli_tables::Table;
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
use krata::{
events::EventStream,
v1::{
common::{guest_image_spec::Image, Guest},
common::{Guest, GuestStatus},
control::{
control_service_client::ControlServiceClient, ListGuestsRequest, ResolveGuestRequest,
},
@ -14,11 +14,11 @@ use krata::{
use serde_json::Value;
use tonic::{transport::Channel, Request};
use crate::format::{guest_simple_line, guest_state_text, kv2line, proto2dynamic, proto2kv};
use crate::format::{guest_simple_line, guest_status_text, kv2line, proto2dynamic, proto2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ListFormat {
CliTable,
Table,
Json,
JsonPretty,
Jsonl,
@ -29,7 +29,7 @@ enum ListFormat {
#[derive(Parser)]
pub struct ListCommand {
#[arg(short, long, default_value = "cli-table")]
#[arg(short, long, default_value = "table")]
format: ListFormat,
#[arg()]
guest: Option<String>,
@ -70,7 +70,7 @@ impl ListCommand {
});
match self.format {
ListFormat::CliTable => {
ListFormat::Table => {
self.print_guest_table(guests)?;
}
@ -114,49 +114,51 @@ impl ListCommand {
fn print_guest_table(&self, guests: Vec<Guest>) -> Result<()> {
let mut table = Table::new();
let header = vec!["name", "uuid", "state", "ipv4", "ipv6", "image"];
table.push_row(&header)?;
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
for guest in guests {
let ipv4 = guest
.state
.as_ref()
.and_then(|x| x.network.as_ref())
.map(|x| x.guest_ipv4.as_str())
.unwrap_or("unknown");
.unwrap_or("n/a");
let ipv6 = guest
.state
.as_ref()
.and_then(|x| x.network.as_ref())
.map(|x| x.guest_ipv6.as_str())
.unwrap_or("unknown");
.unwrap_or("n/a");
let Some(spec) = guest.spec else {
continue;
};
let image = spec
.image
.map(|x| {
x.image
.map(|y| match y {
Image::Oci(oci) => oci.image,
})
.unwrap_or("unknown".to_string())
})
.unwrap_or("unknown".to_string());
table.push_row_string(&vec![
spec.name,
guest.id,
format!("{}", guest_state_text(guest.state.as_ref())),
ipv4.to_string(),
ipv6.to_string(),
image,
])?;
let status = guest.state.as_ref().cloned().unwrap_or_default().status();
let status_text = guest_status_text(status);
let status_color = match status {
GuestStatus::Destroyed | GuestStatus::Failed => Color::Red,
GuestStatus::Destroying | GuestStatus::Exited | GuestStatus::Starting => {
Color::Yellow
}
if table.num_records() == 1 {
GuestStatus::Started => Color::Green,
_ => Color::Reset,
};
table.add_row(vec![
Cell::new(spec.name),
Cell::new(guest.id),
Cell::new(status_text).fg(status_color),
Cell::new(ipv4.to_string()),
Cell::new(ipv6.to_string()),
]);
}
if table.is_empty() {
if self.guest.is_none() {
println!("no guests have been launched");
}
} else {
println!("{}", table.to_string());
println!("{}", table);
}
Ok(())
}

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use anyhow::Result;
use krata::v1::common::{Guest, GuestState, GuestStatus};
use krata::v1::common::{Guest, GuestStatus};
use prost_reflect::{DynamicMessage, ReflectMessage, Value};
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
@ -71,20 +71,6 @@ pub fn guest_status_text(status: GuestStatus) -> String {
.to_string()
}
pub fn guest_state_text(state: Option<&GuestState>) -> String {
let state = state.cloned().unwrap_or_default();
let mut text = guest_status_text(state.status());
if let Some(exit) = state.exit_info {
text.push_str(&format!(" (exit code: {})", exit.code));
}
if let Some(error) = state.error_info {
text.push_str(&format!(" (error: {})", error.message));
}
text
}
pub fn guest_simple_line(guest: &Guest) -> String {
let state = guest_status_text(
guest

View File

@ -49,8 +49,12 @@ impl Daemon {
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
.await?;
let runtime_for_reconciler = runtime.dupe().await?;
let guest_reconciler =
GuestReconciler::new(guests.clone(), events.clone(), runtime_for_reconciler)?;
let guest_reconciler = GuestReconciler::new(
guests.clone(),
events.clone(),
runtime_for_reconciler,
guest_reconciler_notify.clone(),
)?;
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
let generator_task = generator.launch().await?;

View File

@ -18,7 +18,7 @@ use tokio::{
select,
sync::{
mpsc::{channel, Receiver, Sender},
Mutex, Semaphore,
Mutex, RwLock,
},
task::JoinHandle,
time::sleep,
@ -30,6 +30,14 @@ use crate::{
event::{DaemonEvent, DaemonEventContext},
};
const PARALLEL_LIMIT: u32 = 5;
#[derive(Debug)]
enum GuestReconcilerResult {
Unchanged,
Changed { rerun: bool },
}
struct GuestReconcilerEntry {
task: JoinHandle<()>,
sender: Sender<()>,
@ -46,18 +54,25 @@ pub struct GuestReconciler {
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
limiter: Arc<Semaphore>,
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
guest_reconciler_notify: Sender<Uuid>,
reconcile_lock: Arc<RwLock<()>>,
}
impl GuestReconciler {
pub fn new(guests: GuestStore, events: DaemonEventContext, runtime: Runtime) -> Result<Self> {
pub fn new(
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
guest_reconciler_notify: Sender<Uuid>,
) -> Result<Self> {
Ok(Self {
guests,
events,
runtime,
limiter: Arc::new(Semaphore::new(10)),
tasks: Arc::new(Mutex::new(HashMap::new())),
guest_reconciler_notify,
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
})
}
@ -99,6 +114,7 @@ impl GuestReconciler {
}
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
let _permit = self.reconcile_lock.write().await;
trace!("reconciling runtime");
let runtime_guests = self.runtime.list().await?;
let stored_guests = self.guests.list().await?;
@ -131,22 +147,20 @@ impl GuestReconciler {
if changed || initial {
self.guests.update(uuid, stored_guest).await?;
if let Err(error) = self.reconcile(uuid).await {
error!("failed to reconcile guest {}: {}", uuid, error);
}
let _ = self.guest_reconciler_notify.try_send(uuid);
}
}
Ok(())
}
pub async fn reconcile(&self, uuid: Uuid) -> Result<()> {
let _permit = self.limiter.acquire().await?;
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
let _runtime_reconcile_permit = self.reconcile_lock.read().await;
let Some(mut guest) = self.guests.read(uuid).await? else {
warn!(
"notified of reconcile for guest {} but it didn't exist",
uuid
);
return Ok(());
return Ok(false);
};
info!("reconciling guest {}", uuid);
@ -156,14 +170,16 @@ impl GuestReconciler {
guest: Some(guest.clone()),
}))?;
let result = match guest.state.as_ref().map(|x| x.status()).unwrap_or_default() {
let start_status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
let result = match start_status {
GuestStatus::Starting => self.start(uuid, &mut guest).await,
GuestStatus::Destroying | GuestStatus::Exited => self.destroy(uuid, &mut guest).await,
_ => Ok(false),
GuestStatus::Exited => self.exited(&mut guest).await,
GuestStatus::Destroying => self.destroy(uuid, &mut guest).await,
_ => Ok(GuestReconcilerResult::Unchanged),
};
let changed = match result {
Ok(changed) => changed,
let result = match result {
Ok(result) => result,
Err(error) => {
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
guest.state.as_mut().unwrap().status = GuestStatus::Failed.into();
@ -171,16 +187,16 @@ impl GuestReconciler {
message: error.to_string(),
});
warn!("failed to start guest {}: {}", guest.id, error);
true
GuestReconcilerResult::Changed { rerun: false }
}
};
info!("reconciled guest {}", uuid);
let status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
let destroyed = status == GuestStatus::Destroyed || status == GuestStatus::Failed;
let destroyed = status == GuestStatus::Destroyed;
if changed {
let rerun = if let GuestReconcilerResult::Changed { rerun } = result {
let event = DaemonEvent::GuestChanged(GuestChangedEvent {
guest: Some(guest.clone()),
});
@ -194,12 +210,15 @@ impl GuestReconciler {
}
self.events.send(event)?;
rerun
} else {
false
};
Ok(rerun)
}
Ok(())
}
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<bool> {
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
let Some(ref spec) = guest.spec else {
return Err(anyhow!("guest spec not specified"));
};
@ -245,10 +264,19 @@ impl GuestReconciler {
error_info: None,
domid: info.domid,
});
Ok(true)
Ok(GuestReconcilerResult::Changed { rerun: false })
}
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<bool> {
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
if let Some(ref mut state) = guest.state {
state.set_status(GuestStatus::Destroying);
Ok(GuestReconcilerResult::Changed { rerun: true })
} else {
Ok(GuestReconcilerResult::Unchanged)
}
}
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
if let Err(error) = self.runtime.destroy(uuid).await {
trace!("failed to destroy runtime guest {}: {}", uuid, error);
}
@ -261,7 +289,7 @@ impl GuestReconciler {
error_info: None,
domid: guest.state.as_ref().map(|x| x.domid).unwrap_or(u32::MAX),
});
Ok(true)
Ok(GuestReconcilerResult::Changed { rerun: false })
}
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
@ -279,9 +307,24 @@ impl GuestReconciler {
let this = self.clone();
let (sender, mut receiver) = channel(10);
let task = tokio::task::spawn(async move {
while receiver.recv().await.is_some() {
if let Err(error) = this.reconcile(uuid).await {
'notify_loop: loop {
if receiver.recv().await.is_none() {
break 'notify_loop;
}
'rerun_loop: loop {
let rerun = match this.reconcile(uuid).await {
Ok(rerun) => rerun,
Err(error) => {
error!("failed to reconcile guest {}: {}", uuid, error);
false
}
};
if rerun {
continue 'rerun_loop;
}
break 'rerun_loop;
}
}
});

View File

@ -5,6 +5,7 @@ use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use tokio::fs;
#[derive(Clone)]
pub struct ImageCache {
cache_dir: PathBuf,
}

View File

@ -1,17 +1,25 @@
use std::{sync::Arc, time::Duration};
use anyhow::{anyhow, Result};
use log::debug;
use loopdev::{LoopControl, LoopDevice};
use tokio::time::sleep;
use xenclient::BlockDeviceRef;
#[derive(Clone)]
pub struct AutoLoop {
control: LoopControl,
control: Arc<LoopControl>,
}
impl AutoLoop {
pub fn new(control: LoopControl) -> AutoLoop {
AutoLoop { control }
AutoLoop {
control: Arc::new(control),
}
}
pub fn loopify(&self, file: &str) -> Result<BlockDeviceRef> {
debug!("creating loop for file {}", file);
let device = self.control.next_free()?;
device.with().read_only(true).attach(file)?;
let path = device
@ -25,9 +33,10 @@ impl AutoLoop {
Ok(BlockDeviceRef { path, major, minor })
}
pub fn unloop(&self, device: &str) -> Result<()> {
pub async fn unloop(&self, device: &str) -> Result<()> {
let device = LoopDevice::open(device)?;
device.detach()?;
sleep(Duration::from_millis(200)).await;
Ok(())
}
}

View File

@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::net::{IpAddr, Ipv6Addr};
use std::sync::Arc;
use std::{fs, net::Ipv4Addr, str::FromStr};
use advmac::MacAddr6;
@ -8,6 +9,7 @@ use ipnetwork::{IpNetwork, Ipv4Network};
use krata::launchcfg::{
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
};
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
use xenstore::XsdInterface;
@ -33,16 +35,18 @@ pub struct GuestLaunchRequest<'a> {
pub debug: bool,
}
pub struct GuestLauncher {}
pub struct GuestLauncher {
pub launch_semaphore: Arc<Semaphore>,
}
impl GuestLauncher {
pub fn new() -> Result<Self> {
Ok(Self {})
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
Ok(Self { launch_semaphore })
}
pub async fn launch<'r>(
&mut self,
context: &mut RuntimeContext,
context: &RuntimeContext,
request: GuestLaunchRequest<'r>,
) -> Result<GuestInfo> {
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
@ -56,6 +60,7 @@ impl GuestLauncher {
container_mac.set_local(true);
container_mac.set_multicast(false);
let _launch_permit = self.launch_semaphore.acquire().await?;
let guest_ipv4 = self.allocate_ipv4(context).await?;
let guest_ipv6 = container_mac.to_link_local_ipv6();
let gateway_ipv4 = "10.75.70.1";
@ -223,14 +228,14 @@ impl GuestLauncher {
)?),
gateway_ipv6: Some(IpNetwork::new(
IpAddr::V6(Ipv6Addr::from_str(gateway_ipv6)?),
ipv4_network_mask as u8,
ipv6_network_mask as u8,
)?),
gateway_mac: Some(gateway_mac_string.clone()),
state: GuestState { exit_code: None },
}),
Err(error) => {
let _ = context.autoloop.unloop(&image_squashfs_loop.path);
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path);
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;
let _ = fs::remove_dir(&cfgblk.dir);
Err(error.into())
}
@ -243,7 +248,7 @@ impl GuestLauncher {
compiler.compile(&image).await
}
async fn allocate_ipv4(&mut self, context: &mut RuntimeContext) -> Result<Ipv4Addr> {
async fn allocate_ipv4(&self, context: &RuntimeContext) -> Result<Ipv4Addr> {
let network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
let mut used: Vec<Ipv4Addr> = vec![];
for domid_candidate in context.xen.store.list("/local/domain").await? {
@ -270,7 +275,7 @@ impl GuestLauncher {
if found.is_none() {
return Err(anyhow!(
"unable to find ipv4 to allocate to container, ipv4 addresses are exhausted"
"unable to find ipv4 to allocate to guest, ipv4 addresses are exhausted"
));
}

View File

@ -8,7 +8,7 @@ use std::{
use anyhow::{anyhow, Result};
use ipnetwork::IpNetwork;
use loopdev::LoopControl;
use tokio::sync::Mutex;
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::XenClient;
use xenstore::{XsdClient, XsdInterface};
@ -51,6 +51,7 @@ pub struct GuestInfo {
pub state: GuestState,
}
#[derive(Clone)]
pub struct RuntimeContext {
pub image_cache: ImageCache,
pub autoloop: AutoLoop,
@ -94,7 +95,7 @@ impl RuntimeContext {
Err(anyhow!("unable to find required guest file: {}", name))
}
pub async fn list(&mut self) -> Result<Vec<GuestInfo>> {
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
let mut guests: Vec<GuestInfo> = Vec::new();
for domid_candidate in self.xen.store.list("/local/domain").await? {
if domid_candidate == "0" {
@ -218,7 +219,7 @@ impl RuntimeContext {
Ok(guests)
}
pub async fn resolve(&mut self, uuid: Uuid) -> Result<Option<GuestInfo>> {
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<GuestInfo>> {
for guest in self.list().await? {
if guest.uuid == uuid {
return Ok(Some(guest));
@ -254,7 +255,8 @@ impl RuntimeContext {
#[derive(Clone)]
pub struct Runtime {
store: Arc<String>,
context: Arc<Mutex<RuntimeContext>>,
context: RuntimeContext,
launch_semaphore: Arc<Semaphore>,
}
impl Runtime {
@ -262,24 +264,24 @@ impl Runtime {
let context = RuntimeContext::new(store.clone()).await?;
Ok(Self {
store: Arc::new(store),
context: Arc::new(Mutex::new(context)),
context,
launch_semaphore: Arc::new(Semaphore::new(1)),
})
}
pub async fn launch<'a>(&self, request: GuestLaunchRequest<'a>) -> Result<GuestInfo> {
let mut context = self.context.lock().await;
let mut launcher = GuestLauncher::new()?;
launcher.launch(&mut context, request).await
let mut launcher = GuestLauncher::new(self.launch_semaphore.clone())?;
launcher.launch(&self.context, request).await
}
pub async fn destroy(&self, uuid: Uuid) -> Result<Uuid> {
let mut context = self.context.lock().await;
let info = context
let info = self
.context
.resolve(uuid)
.await?
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
let domid = info.domid;
let mut store = XsdClient::open().await?;
let store = XsdClient::open().await?;
let dom_path = store.get_domain_path(domid).await?;
let uuid = match store
.read_string(format!("{}/krata/uuid", dom_path).as_str())
@ -301,9 +303,9 @@ impl Runtime {
.read_string(format!("{}/krata/loops", dom_path).as_str())
.await?;
let loops = RuntimeContext::parse_loop_set(&loops);
context.xen.destroy(domid).await?;
self.context.xen.destroy(domid).await?;
for info in &loops {
context.autoloop.unloop(&info.device)?;
self.context.autoloop.unloop(&info.device).await?;
match &info.delete {
None => {}
Some(delete) => {
@ -320,19 +322,18 @@ impl Runtime {
}
pub async fn console(&self, uuid: Uuid) -> Result<XenConsole> {
let mut context = self.context.lock().await;
let info = context
let info = self
.context
.resolve(uuid)
.await?
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
let domid = info.domid;
let tty = context.xen.get_console_path(domid).await?;
let tty = self.context.xen.get_console_path(domid).await?;
XenConsole::new(&tty).await
}
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
let mut context = self.context.lock().await;
context.list().await
self.context.list().await
}
pub async fn dupe(&self) -> Result<Runtime> {

View File

@ -13,6 +13,7 @@ libc = { workspace = true }
log = { workspace = true }
nix = { workspace = true, features = ["ioctl"] }
thiserror = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }
[lib]

View File

@ -2,11 +2,12 @@ use xencall::error::Result;
use xencall::sys::CreateDomain;
use xencall::XenCall;
fn main() -> Result<()> {
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let domid = call.create_domain(CreateDomain::default())?;
let domid = call.create_domain(CreateDomain::default()).await?;
println!("created domain {}", domid);
Ok(())
}

View File

@ -1,11 +1,12 @@
use xencall::error::Result;
use xencall::XenCall;
fn main() -> Result<()> {
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let info = call.get_domain_info(1)?;
let info = call.get_domain_info(1).await?;
println!("{:?}", info);
Ok(())
}

View File

@ -1,11 +1,12 @@
use xencall::error::Result;
use xencall::XenCall;
fn main() -> Result<()> {
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let context = call.get_vcpu_context(224, 0)?;
let context = call.get_vcpu_context(224, 0).await?;
println!("{:?}", context);
Ok(())
}

View File

@ -1,11 +1,12 @@
use xencall::error::Result;
use xencall::XenCall;
fn main() -> Result<()> {
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let info = call.get_version_capabilities()?;
let info = call.get_version_capabilities().await?;
println!("{:?}", info);
Ok(())
}

View File

@ -8,8 +8,12 @@ pub enum Error {
Kernel(#[from] nix::errno::Errno),
#[error("io issue encountered: {0}")]
Io(#[from] io::Error),
#[error("failed to acquire semaphore: {0}")]
AcquireSemaphoreFailed(#[from] tokio::sync::AcquireError),
#[error("populate physmap failed")]
PopulatePhysmapFailed,
#[error("mmap batch failed: {0}")]
MmapBatchFailed(nix::errno::Errno),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -18,15 +18,19 @@ use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use log::trace;
use nix::errno::Errno;
use std::ffi::{c_long, c_uint, c_ulong, c_void};
use std::sync::Arc;
use sys::{XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION};
use tokio::sync::Semaphore;
use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd;
use std::ptr::addr_of_mut;
use std::slice;
#[derive(Clone)]
pub struct XenCall {
pub handle: File,
pub handle: Arc<File>,
semaphore: Arc<Semaphore>,
domctl_interface_version: u32,
}
@ -39,7 +43,8 @@ impl XenCall {
let domctl_interface_version =
XenCall::detect_domctl_interface_version(&handle, current_domid)?;
Ok(XenCall {
handle,
handle: Arc::new(handle),
semaphore: Arc::new(Semaphore::new(1)),
domctl_interface_version,
})
}
@ -68,7 +73,8 @@ impl XenCall {
Err(Error::XenVersionUnsupported)
}
pub fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
pub async fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
let _permit = self.semaphore.acquire().await.ok()?;
trace!(
"call fd={} mmap addr={:#x} len={}",
self.handle.as_raw_fd(),
@ -99,7 +105,8 @@ impl XenCall {
}
}
pub fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
pub async fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
let _permit = self.semaphore.acquire().await?;
trace!(
"call fd={} hypercall op={:#x} arg={:?}",
self.handle.as_raw_fd(),
@ -113,29 +120,29 @@ impl XenCall {
}
}
pub fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
self.hypercall(op, [0, 0, 0, 0, 0])
pub async fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
self.hypercall(op, [0, 0, 0, 0, 0]).await
}
pub fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
self.hypercall(op, [arg1, 0, 0, 0, 0])
pub async fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
self.hypercall(op, [arg1, 0, 0, 0, 0]).await
}
pub fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
self.hypercall(op, [arg1, arg2, 0, 0, 0])
pub async fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
self.hypercall(op, [arg1, arg2, 0, 0, 0]).await
}
pub fn hypercall3(
pub async fn hypercall3(
&self,
op: c_ulong,
arg1: c_ulong,
arg2: c_ulong,
arg3: c_ulong,
) -> Result<c_long> {
self.hypercall(op, [arg1, arg2, arg3, 0, 0])
self.hypercall(op, [arg1, arg2, arg3, 0, 0]).await
}
pub fn hypercall4(
pub async fn hypercall4(
&self,
op: c_ulong,
arg1: c_ulong,
@ -143,10 +150,10 @@ impl XenCall {
arg3: c_ulong,
arg4: c_ulong,
) -> Result<c_long> {
self.hypercall(op, [arg1, arg2, arg3, arg4, 0])
self.hypercall(op, [arg1, arg2, arg3, arg4, 0]).await
}
pub fn hypercall5(
pub async fn hypercall5(
&self,
op: c_ulong,
arg1: c_ulong,
@ -155,10 +162,10 @@ impl XenCall {
arg4: c_ulong,
arg5: c_ulong,
) -> Result<c_long> {
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5])
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5]).await
}
pub fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
pub async fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
trace!(
"call fd={} multicall calls={:?}",
self.handle.as_raw_fd(),
@ -168,11 +175,12 @@ impl XenCall {
HYPERVISOR_MULTICALL,
calls.as_mut_ptr() as c_ulong,
calls.len() as c_ulong,
)?;
)
.await?;
Ok(())
}
pub fn map_resource(
pub async fn map_resource(
&self,
domid: u32,
typ: u32,
@ -181,6 +189,7 @@ impl XenCall {
num: u64,
addr: u64,
) -> Result<()> {
let _permit = self.semaphore.acquire().await?;
let mut resource = MmapResource {
dom: domid as u16,
typ,
@ -195,7 +204,14 @@ impl XenCall {
Ok(())
}
pub fn mmap_batch(&self, domid: u32, num: u64, addr: u64, mfns: Vec<u64>) -> Result<c_long> {
pub async fn mmap_batch(
&self,
domid: u32,
num: u64,
addr: u64,
mfns: Vec<u64>,
) -> Result<c_long> {
let _permit = self.semaphore.acquire().await?;
trace!(
"call fd={} mmap_batch domid={} num={} addr={:#x} mfns={:?}",
self.handle.as_raw_fd(),
@ -218,7 +234,7 @@ impl XenCall {
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
if let Err(errno) = result {
if errno != Errno::ENOENT {
return Err(errno)?;
return Err(Error::MmapBatchFailed(errno))?;
}
usleep(100);
@ -253,7 +269,7 @@ impl XenCall {
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
if let Err(n) = result {
if n != Errno::ENOENT {
return Err(n)?;
return Err(Error::MmapBatchFailed(n))?;
}
}
@ -273,7 +289,7 @@ impl XenCall {
}
}
pub fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
pub async fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
trace!(
"call fd={} get_version_capabilities",
self.handle.as_raw_fd()
@ -285,26 +301,29 @@ impl XenCall {
HYPERVISOR_XEN_VERSION,
XENVER_CAPABILITIES,
addr_of_mut!(info) as c_ulong,
)?;
)
.await?;
Ok(info)
}
pub fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)?;
pub async fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)
.await?;
Ok(())
}
pub fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
pub async fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
let mut alloc_unbound = EvtChnAllocUnbound {
dom: domid as u16,
remote_dom: remote_domid as u16,
port: 0,
};
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)?;
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)
.await?;
Ok(alloc_unbound.port)
}
pub fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
pub async fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
trace!(
"domctl fd={} get_domain_info domid={}",
self.handle.as_raw_fd(),
@ -318,11 +337,12 @@ impl XenCall {
get_domain_info: GetDomainInfo::default(),
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.get_domain_info })
}
pub fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
pub async fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
trace!(
"domctl fd={} create_domain create_domain={:?}",
self.handle.as_raw_fd(),
@ -334,11 +354,12 @@ impl XenCall {
domid: 0,
value: DomCtlValue { create_domain },
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(domctl.domid)
}
pub fn pause_domain(&self, domid: u32) -> Result<()> {
pub async fn pause_domain(&self, domid: u32) -> Result<()> {
trace!(
"domctl fd={} pause_domain domid={:?}",
self.handle.as_raw_fd(),
@ -350,11 +371,12 @@ impl XenCall {
domid,
value: DomCtlValue { pad: [0; 128] },
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn unpause_domain(&self, domid: u32) -> Result<()> {
pub async fn unpause_domain(&self, domid: u32) -> Result<()> {
trace!(
"domctl fd={} unpause_domain domid={:?}",
self.handle.as_raw_fd(),
@ -366,11 +388,12 @@ impl XenCall {
domid,
value: DomCtlValue { pad: [0; 128] },
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
pub async fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
trace!(
"domctl fd={} set_max_mem domid={} memkb={}",
self.handle.as_raw_fd(),
@ -385,11 +408,12 @@ impl XenCall {
max_mem: MaxMem { max_memkb: memkb },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
pub async fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
trace!(
"domctl fd={} set_max_vcpus domid={} max_vcpus={}",
self.handle.as_raw_fd(),
@ -404,11 +428,12 @@ impl XenCall {
max_cpus: MaxVcpus { max_vcpus },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
pub async fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
trace!(
"domctl fd={} set_address_size domid={} size={}",
self.handle.as_raw_fd(),
@ -423,11 +448,12 @@ impl XenCall {
address_size: AddressSize { size },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
pub async fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
trace!(
"domctl fd={} get_vcpu_context domid={}",
self.handle.as_raw_fd(),
@ -447,11 +473,12 @@ impl XenCall {
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { wrapper.value })
}
pub fn set_vcpu_context(
pub async fn set_vcpu_context(
&self,
domid: u32,
vcpu: u32,
@ -476,11 +503,12 @@ impl XenCall {
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
pub async fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
let mut buffer: Vec<u64> = frames.to_vec();
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_GETPAGEFRAMEINFO3,
@ -493,7 +521,8 @@ impl XenCall {
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
let slice = unsafe {
slice::from_raw_parts_mut(
domctl.value.get_page_frame_info.array as *mut u64,
@ -503,7 +532,7 @@ impl XenCall {
Ok(slice.to_vec())
}
pub fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
pub async fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
trace!(
"domctl fd={} hypercall_init domid={} gmfn={}",
self.handle.as_raw_fd(),
@ -518,11 +547,12 @@ impl XenCall {
hypercall_init: HypercallInit { gmfn },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn destroy_domain(&self, domid: u32) -> Result<()> {
pub async fn destroy_domain(&self, domid: u32) -> Result<()> {
trace!(
"domctl fd={} destroy_domain domid={}",
self.handle.as_raw_fd(),
@ -534,11 +564,12 @@ impl XenCall {
domid,
value: DomCtlValue { pad: [0; 128] },
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
pub async fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
let mut memory_map = MemoryMap {
count: 0,
buffer: 0,
@ -547,18 +578,20 @@ impl XenCall {
HYPERVISOR_MEMORY_OP,
XEN_MEM_MEMORY_MAP as c_ulong,
addr_of_mut!(memory_map) as c_ulong,
)?;
)
.await?;
let mut buffer = vec![0u8; memory_map.count as usize * size_of_entry];
memory_map.buffer = buffer.as_mut_ptr() as c_ulong;
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_MEMORY_MAP as c_ulong,
addr_of_mut!(memory_map) as c_ulong,
)?;
)
.await?;
Ok(buffer)
}
pub fn populate_physmap(
pub async fn populate_physmap(
&self,
domid: u32,
nr_extents: u64,
@ -590,7 +623,7 @@ impl XenCall {
0,
],
}];
self.multicall(calls)?;
self.multicall(calls).await?;
let code = calls[0].result;
if code > !0xfff {
return Err(Error::PopulatePhysmapFailed);
@ -602,7 +635,7 @@ impl XenCall {
Ok(extents)
}
pub fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
pub async fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
trace!(
"memory fd={} claim_pages domid={} pages={}",
self.handle.as_raw_fd(),
@ -620,11 +653,12 @@ impl XenCall {
HYPERVISOR_MEMORY_OP,
XEN_MEM_CLAIM_PAGES as c_ulong,
addr_of_mut!(reservation) as c_ulong,
)?;
)
.await?;
Ok(())
}
pub fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
pub async fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
let mut ops = MmuExtOp { cmd, arg1, arg2 };
self.hypercall4(
@ -634,6 +668,7 @@ impl XenCall {
0,
domid as c_ulong,
)
.await
.map(|_| ())
}
}

View File

@ -9,6 +9,7 @@ edition = "2021"
resolver = "2"
[dependencies]
async-trait = { workspace = true }
elf = { workspace = true }
flate2 = { workspace = true }
libc = { workspace = true }
@ -16,6 +17,7 @@ log = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.3" }
krata-xenstore = { path = "../xenstore", version = "^0.0.3" }
memchr = { workspace = true }
nix = { workspace = true }
slice-copy = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
@ -24,6 +26,7 @@ xz2 = { workspace = true }
[dev-dependencies]
env_logger = { workspace = true }
tokio = { workspace = true }
[lib]
name = "xenclient"

View File

@ -13,7 +13,7 @@ async fn main() -> Result<()> {
}
let kernel_image_path = args.get(1).expect("argument not specified");
let initrd_path = args.get(2).expect("argument not specified");
let mut client = XenClient::open(0).await?;
let client = XenClient::open(0).await?;
let config = DomainConfig {
backend_domid: 0,
name: "xenclient-test",

View File

@ -41,9 +41,9 @@ impl Arm64BootSetup {
Arm64BootSetup {}
}
fn populate_one_size(
&mut self,
setup: &mut BootSetup,
async fn populate_one_size(
&self,
setup: &mut BootSetup<'_>,
pfn_shift: u64,
base_pfn: u64,
pfn_count: u64,
@ -78,20 +78,23 @@ impl Arm64BootSetup {
extents[i as usize] = base_pfn + (i << pfn_shift);
}
let result_extents = setup.call.populate_physmap(
let result_extents = setup
.call
.populate_physmap(
setup.domid,
count,
pfn_shift as u32,
0,
&extents[0usize..count as usize],
)?;
)
.await?;
slice_copy::copy(extents, &result_extents);
Ok((result_extents.len() as u64) << pfn_shift)
}
fn populate_guest_memory(
async fn populate_guest_memory(
&mut self,
setup: &mut BootSetup,
setup: &mut BootSetup<'_>,
base_pfn: u64,
pfn_count: u64,
) -> Result<()> {
@ -99,43 +102,51 @@ impl Arm64BootSetup {
for pfn in 0..extents.len() {
let mut allocsz = (1024 * 1024).min(pfn_count - pfn as u64);
allocsz = self.populate_one_size(
allocsz = self
.populate_one_size(
setup,
PFN_512G_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)?;
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self.populate_one_size(
allocsz = self
.populate_one_size(
setup,
PFN_1G_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)?;
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self.populate_one_size(
allocsz = self
.populate_one_size(
setup,
PFN_2M_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)?;
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self.populate_one_size(
allocsz = self
.populate_one_size(
setup,
PFN_4K_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)?;
)
.await?;
if allocsz == 0 {
return Err(Error::MemorySetupFailed("allocsz is zero"));
}
@ -145,6 +156,7 @@ impl Arm64BootSetup {
}
}
#[async_trait::async_trait]
impl ArchBootSetup for Arm64BootSetup {
fn page_size(&mut self) -> u64 {
ARM_PAGE_SIZE
@ -158,15 +170,15 @@ impl ArchBootSetup for Arm64BootSetup {
true
}
fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
async fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
Ok(())
}
fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
async fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
Ok(())
}
fn meminit(
async fn meminit(
&mut self,
setup: &mut BootSetup,
total_pages: u64,
@ -176,7 +188,7 @@ impl ArchBootSetup for Arm64BootSetup {
let kernel_segment = kernel_segment
.as_ref()
.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
setup.call.claim_pages(setup.domid, total_pages)?;
setup.call.claim_pages(setup.domid, total_pages).await?;
let mut ramsize = total_pages << XEN_PAGE_SHIFT;
let bankbase = GUEST_RAM_BANK_BASES;
@ -214,7 +226,8 @@ impl ArchBootSetup for Arm64BootSetup {
}
for i in 0..2 {
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])?;
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])
.await?;
}
let bank0end = bankbase[0] + (rambank_size[0] << XEN_PAGE_SHIFT);
@ -227,15 +240,15 @@ impl ArchBootSetup for Arm64BootSetup {
} else {
return Err(Error::MemorySetupFailed("unable to determine modbase"));
};
setup.call.claim_pages(setup.domid, 0)?;
setup.call.claim_pages(setup.domid, 0).await?;
Ok(())
}
fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
async fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
Ok(())
}
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
let mut vcpu = VcpuGuestContext::default();
vcpu.user_regs.pc = state.image_info.virt_entry;
vcpu.user_regs.x0 = 0xffffffff;
@ -249,11 +262,11 @@ impl ArchBootSetup for Arm64BootSetup {
vcpu.user_regs.cpsr = PSR_GUEST64_INIT;
vcpu.flags = 1 << 0; // VGCF_ONLINE
trace!("vcpu context: {:?}", vcpu);
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
Ok(())
}
fn alloc_p2m_segment(
async fn alloc_p2m_segment(
&mut self,
_: &mut BootSetup,
_: &BootImageInfo,
@ -261,7 +274,7 @@ impl ArchBootSetup for Arm64BootSetup {
Ok(None)
}
fn alloc_page_tables(
async fn alloc_page_tables(
&mut self,
_: &mut BootSetup,
_: &BootImageInfo,
@ -269,7 +282,7 @@ impl ArchBootSetup for Arm64BootSetup {
Ok(None)
}
fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
async fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
Ok(())
}
}

View File

@ -4,6 +4,7 @@ use crate::sys::{GrantEntry, XEN_PAGE_SHIFT};
use crate::Error;
use libc::munmap;
use log::debug;
use nix::errno::Errno;
use slice_copy::copy;
use crate::mem::ARCH_PAGE_SHIFT;
@ -83,33 +84,34 @@ impl BootSetup<'_> {
}
}
fn initialize_memory(
async fn initialize_memory(
&mut self,
arch: &mut dyn ArchBootSetup,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
total_pages: u64,
kernel_segment: &Option<DomainSegment>,
initrd_segment: &Option<DomainSegment>,
) -> Result<()> {
self.call.set_address_size(self.domid, 64)?;
arch.meminit(self, total_pages, kernel_segment, initrd_segment)?;
self.call.set_address_size(self.domid, 64).await?;
arch.meminit(self, total_pages, kernel_segment, initrd_segment)
.await?;
Ok(())
}
fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
async fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
if image_info.virt_hypercall == XEN_UNSET_ADDR {
return Ok(());
}
let pfn = (image_info.virt_hypercall - image_info.virt_base) >> ARCH_PAGE_SHIFT;
let mfn = self.phys.p2m[pfn as usize];
self.call.hypercall_init(self.domid, mfn)?;
self.call.hypercall_init(self.domid, mfn).await?;
Ok(())
}
pub fn initialize(
pub async fn initialize<I: BootImageLoader + Send + Sync>(
&mut self,
arch: &mut dyn ArchBootSetup,
image_loader: &dyn BootImageLoader,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
image_loader: &I,
initrd: &[u8],
max_vcpus: u32,
mem_mb: u64,
@ -117,60 +119,68 @@ impl BootSetup<'_> {
) -> Result<BootState> {
debug!("initialize max_vcpus={:?} mem_mb={:?}", max_vcpus, mem_mb);
let page_size = arch.page_size();
let image_info = image_loader.parse()?;
debug!("initialize image_info={:?}", image_info);
let mut kernel_segment: Option<DomainSegment> = None;
let mut initrd_segment: Option<DomainSegment> = None;
if !image_info.unmapped_initrd {
initrd_segment = Some(self.alloc_module(arch, initrd)?);
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
}
if arch.needs_early_kernel() {
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
kernel_segment = Some(
self.load_kernel_segment(page_size, image_loader, &image_info)
.await?,
);
}
let total_pages = mem_mb << (20 - arch.page_shift());
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)?;
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)
.await?;
self.virt_alloc_end = image_info.virt_base;
if kernel_segment.is_none() {
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
kernel_segment = Some(
self.load_kernel_segment(page_size, image_loader, &image_info)
.await?,
);
}
let mut p2m_segment: Option<DomainSegment> = None;
if image_info.virt_p2m_base >= image_info.virt_base
|| (image_info.virt_p2m_base & ((1 << arch.page_shift()) - 1)) != 0
{
p2m_segment = arch.alloc_p2m_segment(self, &image_info)?;
p2m_segment = arch.alloc_p2m_segment(self, &image_info).await?;
}
let start_info_segment = self.alloc_page(arch)?;
let xenstore_segment = self.alloc_page(arch)?;
let start_info_segment = self.alloc_page(page_size)?;
let xenstore_segment = self.alloc_page(page_size)?;
let mut consoles: Vec<(u32, DomainSegment)> = Vec::new();
for _ in 0..console_count {
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
let page = self.alloc_page(arch)?;
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
let page = self.alloc_page(page_size)?;
consoles.push((evtchn, page));
}
let page_table_segment = arch.alloc_page_tables(self, &image_info)?;
let boot_stack_segment = self.alloc_page(arch)?;
let page_table_segment = arch.alloc_page_tables(self, &image_info).await?;
let boot_stack_segment = self.alloc_page(page_size)?;
if self.virt_pgtab_end > 0 {
self.alloc_padding_pages(arch, self.virt_pgtab_end)?;
self.alloc_padding_pages(page_size, self.virt_pgtab_end)?;
}
if p2m_segment.is_none() {
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info)? {
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info).await? {
segment.vstart = image_info.virt_p2m_base;
p2m_segment = Some(segment);
}
}
if image_info.unmapped_initrd {
initrd_segment = Some(self.alloc_module(arch, initrd)?);
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
}
let initrd_segment = initrd_segment.unwrap();
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
let kernel_segment =
kernel_segment.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
@ -192,35 +202,37 @@ impl BootSetup<'_> {
Ok(state)
}
pub fn boot(
pub async fn boot(
&mut self,
arch: &mut dyn ArchBootSetup,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
state: &mut BootState,
cmdline: &str,
) -> Result<()> {
let domain_info = self.call.get_domain_info(self.domid)?;
let domain_info = self.call.get_domain_info(self.domid).await?;
let shared_info_frame = domain_info.shared_info_frame;
state.shared_info_frame = shared_info_frame;
arch.setup_page_tables(self, state)?;
arch.setup_start_info(self, state, cmdline)?;
self.setup_hypercall_page(&state.image_info)?;
arch.bootlate(self, state)?;
arch.setup_shared_info(self, state.shared_info_frame)?;
arch.vcpu(self, state)?;
arch.setup_page_tables(self, state).await?;
arch.setup_start_info(self, state, cmdline).await?;
self.setup_hypercall_page(&state.image_info).await?;
arch.bootlate(self, state).await?;
arch.setup_shared_info(self, state.shared_info_frame)
.await?;
arch.vcpu(self, state).await?;
self.phys.unmap_all()?;
self.gnttab_seed(state)?;
self.gnttab_seed(state).await?;
Ok(())
}
fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
async fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
let console_gfn =
self.phys.p2m[state.consoles.first().map(|x| x.1.pfn).unwrap_or(0) as usize];
let xenstore_gfn = self.phys.p2m[state.xenstore_segment.pfn as usize];
let addr = self
.call
.mmap(0, 1 << XEN_PAGE_SHIFT)
.await
.ok_or(Error::MmapFailed)?;
self.call.map_resource(self.domid, 1, 0, 0, 1, addr)?;
self.call.map_resource(self.domid, 1, 0, 0, 1, addr).await?;
let entries = unsafe { slice::from_raw_parts_mut(addr as *mut GrantEntry, 2) };
entries[0].flags = 1 << 0;
entries[0].domid = 0;
@ -231,23 +243,25 @@ impl BootSetup<'_> {
unsafe {
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
if result != 0 {
return Err(Error::UnmapFailed);
return Err(Error::UnmapFailed(Errno::from_raw(result)));
}
}
Ok(())
}
fn load_kernel_segment(
async fn load_kernel_segment<I: BootImageLoader + Send + Sync>(
&mut self,
arch: &mut dyn ArchBootSetup,
image_loader: &dyn BootImageLoader,
page_size: u64,
image_loader: &I,
image_info: &BootImageInfo,
) -> Result<DomainSegment> {
let kernel_segment = self.alloc_segment(
arch,
let kernel_segment = self
.alloc_segment(
page_size,
image_info.virt_kstart,
image_info.virt_kend - image_info.virt_kstart,
)?;
)
.await?;
let kernel_segment_ptr = kernel_segment.addr as *mut u8;
let kernel_segment_slice =
unsafe { slice::from_raw_parts_mut(kernel_segment_ptr, kernel_segment.size as usize) };
@ -264,18 +278,19 @@ impl BootSetup<'_> {
(1 << bits) - 1
}
pub(crate) fn alloc_segment(
pub(crate) async fn alloc_segment(
&mut self,
arch: &mut dyn ArchBootSetup,
page_size: u64,
start: u64,
size: u64,
) -> Result<DomainSegment> {
debug!("alloc_segment {:#x} {:#x}", start, size);
if start > 0 {
self.alloc_padding_pages(arch, start)?;
self.alloc_padding_pages(page_size, start)?;
}
let page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
let pages = (size + page_size as u64 - 1) / page_size as u64;
let local_page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
let pages = (size + local_page_size as u64 - 1) / local_page_size as u64;
let start = self.virt_alloc_end;
let mut segment = DomainSegment {
@ -288,12 +303,12 @@ impl BootSetup<'_> {
pages,
};
self.chk_alloc_pages(arch, pages)?;
self.chk_alloc_pages(page_size, pages)?;
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages)?;
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages).await?;
segment.addr = ptr;
let slice = unsafe {
slice::from_raw_parts_mut(ptr as *mut u8, (pages * page_size as u64) as usize)
slice::from_raw_parts_mut(ptr as *mut u8, (pages * local_page_size as u64) as usize)
};
slice.fill(0);
segment.vend = self.virt_alloc_end;
@ -304,15 +319,15 @@ impl BootSetup<'_> {
Ok(segment)
}
fn alloc_page(&mut self, arch: &mut dyn ArchBootSetup) -> Result<DomainSegment> {
fn alloc_page(&mut self, page_size: u64) -> Result<DomainSegment> {
let start = self.virt_alloc_end;
let pfn = self.pfn_alloc_end;
self.chk_alloc_pages(arch, 1)?;
self.chk_alloc_pages(page_size, 1)?;
debug!("alloc_page {:#x} (pfn {:#x})", start, pfn);
Ok(DomainSegment {
vstart: start,
vend: (start + arch.page_size()) - 1,
vend: (start + page_size) - 1,
pfn,
addr: 0,
size: 0,
@ -321,31 +336,29 @@ impl BootSetup<'_> {
})
}
fn alloc_module(
&mut self,
arch: &mut dyn ArchBootSetup,
buffer: &[u8],
) -> Result<DomainSegment> {
let segment = self.alloc_segment(arch, 0, buffer.len() as u64)?;
async fn alloc_module(&mut self, page_size: u64, buffer: &[u8]) -> Result<DomainSegment> {
let segment = self
.alloc_segment(page_size, 0, buffer.len() as u64)
.await?;
let slice = unsafe { slice::from_raw_parts_mut(segment.addr as *mut u8, buffer.len()) };
copy(slice, buffer);
Ok(segment)
}
fn alloc_padding_pages(&mut self, arch: &mut dyn ArchBootSetup, boundary: u64) -> Result<()> {
if (boundary & (arch.page_size() - 1)) != 0 {
fn alloc_padding_pages(&mut self, page_size: u64, boundary: u64) -> Result<()> {
if (boundary & (page_size - 1)) != 0 {
return Err(Error::MemorySetupFailed("boundary is incorrect"));
}
if boundary < self.virt_alloc_end {
return Err(Error::MemorySetupFailed("boundary is below allocation end"));
}
let pages = (boundary - self.virt_alloc_end) / arch.page_size();
self.chk_alloc_pages(arch, pages)?;
let pages = (boundary - self.virt_alloc_end) / page_size;
self.chk_alloc_pages(page_size, pages)?;
Ok(())
}
fn chk_alloc_pages(&mut self, arch: &mut dyn ArchBootSetup, pages: u64) -> Result<()> {
fn chk_alloc_pages(&mut self, page_size: u64, pages: u64) -> Result<()> {
if pages > self.total_pages
|| self.pfn_alloc_end > self.total_pages
|| pages > self.total_pages - self.pfn_alloc_end
@ -354,47 +367,56 @@ impl BootSetup<'_> {
}
self.pfn_alloc_end += pages;
self.virt_alloc_end += pages * arch.page_size();
self.virt_alloc_end += pages * page_size;
Ok(())
}
}
#[async_trait::async_trait]
pub trait ArchBootSetup {
fn page_size(&mut self) -> u64;
fn page_shift(&mut self) -> u64;
fn needs_early_kernel(&mut self) -> bool;
fn alloc_p2m_segment(
async fn alloc_p2m_segment(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
) -> Result<Option<DomainSegment>>;
fn alloc_page_tables(
async fn alloc_page_tables(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
) -> Result<Option<DomainSegment>>;
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
async fn setup_page_tables(
&mut self,
setup: &mut BootSetup,
state: &mut BootState,
) -> Result<()>;
fn setup_start_info(
async fn setup_start_info(
&mut self,
setup: &mut BootSetup,
state: &BootState,
cmdline: &str,
) -> Result<()>;
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()>;
async fn setup_shared_info(
&mut self,
setup: &mut BootSetup,
shared_info_frame: u64,
) -> Result<()>;
fn meminit(
async fn meminit(
&mut self,
setup: &mut BootSetup,
total_pages: u64,
kernel_segment: &Option<DomainSegment>,
initrd_segment: &Option<DomainSegment>,
) -> Result<()>;
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
}

View File

@ -22,8 +22,8 @@ pub enum Error {
ElfParseFailed(#[from] elf::ParseError),
#[error("mmap failed")]
MmapFailed,
#[error("munmap failed")]
UnmapFailed,
#[error("munmap failed: {0}")]
UnmapFailed(nix::errno::Errno),
#[error("memory setup failed: {0}")]
MemorySetupFailed(&'static str),
#[error("populate physmap failed: wanted={0}, received={1}, input_extents={2}")]

View File

@ -16,7 +16,7 @@ pub mod arm64;
#[cfg(target_arch = "aarch64")]
use crate::arm64::Arm64BootSetup;
use crate::boot::BootSetup;
use crate::boot::{ArchBootSetup, BootSetup};
use crate::elfloader::ElfImageLoader;
use crate::error::{Error, Result};
use boot::BootState;
@ -34,6 +34,7 @@ use xenstore::{
XsPermission, XsdClient, XsdInterface, XS_PERM_NONE, XS_PERM_READ, XS_PERM_READ_WRITE,
};
#[derive(Clone)]
pub struct XenClient {
pub store: XsdClient,
call: XenCall,
@ -115,7 +116,7 @@ impl XenClient {
Ok(XenClient { store, call })
}
pub async fn create(&mut self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
let mut domain = CreateDomain {
max_vcpus: config.max_vcpus,
..Default::default()
@ -125,7 +126,7 @@ impl XenClient {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
}
let domid = self.call.create_domain(domain)?;
let domid = self.call.create_domain(domain).await?;
match self.init(domid, &domain, config).await {
Ok(created) => Ok(created),
Err(err) => {
@ -138,7 +139,7 @@ impl XenClient {
}
async fn init(
&mut self,
&self,
domid: u32,
domain: &CreateDomain,
config: &DomainConfig<'_>,
@ -253,8 +254,8 @@ impl XenClient {
tx.commit().await?;
}
self.call.set_max_vcpus(domid, config.max_vcpus)?;
self.call.set_max_mem(domid, config.mem_mb * 1024)?;
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
let xenstore_evtchn: u32;
@ -265,19 +266,21 @@ impl XenClient {
{
let mut boot = BootSetup::new(&self.call, domid);
#[cfg(target_arch = "x86_64")]
let mut arch = X86BootSetup::new();
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
#[cfg(target_arch = "aarch64")]
let mut arch = Arm64BootSetup::new();
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
let initrd = read(config.initrd_path)?;
state = boot.initialize(
state = boot
.initialize(
&mut arch,
&image_loader,
initrd.as_slice(),
config.max_vcpus,
config.mem_mb,
1,
)?;
boot.boot(&mut arch, &mut state, config.cmdline)?;
)
.await?;
boot.boot(&mut arch, &mut state, config.cmdline).await?;
xenstore_evtchn = state.store_evtchn;
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
p2m = boot.phys.p2m;
@ -418,7 +421,8 @@ impl XenClient {
for channel in &config.event_channels {
let id = self
.call
.evtchn_alloc_unbound(domid, config.backend_domid)?;
.evtchn_alloc_unbound(domid, config.backend_domid)
.await?;
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
self.store
.write_string(&format!("{}/name", channel_path), channel.name)
@ -428,12 +432,12 @@ impl XenClient {
.await?;
}
self.call.unpause_domain(domid)?;
self.call.unpause_domain(domid).await?;
Ok(CreatedDomain { domid, channels })
}
async fn disk_device_add(
&mut self,
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
@ -486,7 +490,7 @@ impl XenClient {
#[allow(clippy::too_many_arguments, clippy::unnecessary_unwrap)]
async fn console_device_add(
&mut self,
&self,
channel: &DomainChannel,
p2m: &[u64],
state: &BootState,
@ -553,7 +557,7 @@ impl XenClient {
}
async fn fs_9p_device_add(
&mut self,
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
@ -591,7 +595,7 @@ impl XenClient {
}
async fn vif_device_add(
&mut self,
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
@ -650,7 +654,7 @@ impl XenClient {
#[allow(clippy::too_many_arguments)]
async fn device_add(
&mut self,
&self,
typ: &str,
id: u64,
dom_path: &str,
@ -713,15 +717,15 @@ impl XenClient {
Ok(())
}
pub async fn destroy(&mut self, domid: u32) -> Result<()> {
pub async fn destroy(&self, domid: u32) -> Result<()> {
if let Err(err) = self.destroy_store(domid).await {
warn!("failed to destroy store for domain {}: {}", domid, err);
}
self.call.destroy_domain(domid)?;
self.call.destroy_domain(domid).await?;
Ok(())
}
async fn destroy_store(&mut self, domid: u32) -> Result<()> {
async fn destroy_store(&self, domid: u32) -> Result<()> {
let dom_path = self.store.get_domain_path(domid).await?;
let vm_path = self.store.read_string(&format!("{}/vm", dom_path)).await?;
if vm_path.is_none() {
@ -813,7 +817,7 @@ impl XenClient {
Ok(())
}
pub async fn get_console_path(&mut self, domid: u32) -> Result<String> {
pub async fn get_console_path(&self, domid: u32) -> Result<String> {
let dom_path = self.store.get_domain_path(domid).await?;
let console_tty_path = format!("{}/console/tty", dom_path);
let mut tty: Option<String> = None;

View File

@ -3,6 +3,7 @@ use crate::sys::{XEN_PAGE_SHIFT, XEN_PAGE_SIZE};
use crate::Error;
use libc::munmap;
use log::debug;
use nix::errno::Errno;
use std::ffi::c_void;
#[cfg(target_arch = "aarch64")]
@ -45,7 +46,7 @@ impl PhysicalPages<'_> {
self.p2m.len() as u64
}
pub fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
pub async fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
for page in &self.pages {
if pfn >= page.pfn + page.count {
continue;
@ -76,10 +77,10 @@ impl PhysicalPages<'_> {
return Err(Error::MemorySetupFailed("page count is zero"));
}
self.pfn_alloc(pfn, count)
self.pfn_alloc(pfn, count).await
}
fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
async fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
let mut entries = vec![MmapEntry::default(); count as usize];
for (i, entry) in entries.iter_mut().enumerate() {
entry.mfn = self.p2m[pfn as usize + i];
@ -98,9 +99,13 @@ impl PhysicalPages<'_> {
let addr = self
.call
.mmap(0, actual_mmap_len)
.await
.ok_or(Error::MmapFailed)?;
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
let result = self
.call
.mmap_batch(self.domid, num as u64, addr, pfns)
.await?;
if result != 0 {
return Err(Error::MmapFailed);
}
@ -117,7 +122,7 @@ impl PhysicalPages<'_> {
Ok(addr)
}
pub fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
let num = ((size + XEN_PAGE_SIZE - 1) >> XEN_PAGE_SHIFT) as usize;
let mut pfns = vec![u64::MAX; num];
for (i, item) in pfns.iter_mut().enumerate().take(num) {
@ -128,9 +133,13 @@ impl PhysicalPages<'_> {
let addr = self
.call
.mmap(0, actual_mmap_len)
.await
.ok_or(Error::MmapFailed)?;
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
let result = self
.call
.mmap_batch(self.domid, num as u64, addr, pfns)
.await?;
if result != 0 {
return Err(Error::MmapFailed);
}
@ -155,7 +164,7 @@ impl PhysicalPages<'_> {
(page.count << ARCH_PAGE_SHIFT) as usize,
);
if err != 0 {
return Err(Error::UnmapFailed);
return Err(Error::UnmapFailed(Errno::from_raw(err)));
}
}
}
@ -181,7 +190,7 @@ impl PhysicalPages<'_> {
page.ptr
);
if err != 0 {
return Err(Error::UnmapFailed);
return Err(Error::UnmapFailed(Errno::from_raw(err)));
}
self.pages.remove(i);
}

View File

@ -275,6 +275,7 @@ impl X86BootSetup {
}
}
#[async_trait::async_trait]
impl ArchBootSetup for X86BootSetup {
fn page_size(&mut self) -> u64 {
X86_PAGE_SIZE
@ -288,7 +289,7 @@ impl ArchBootSetup for X86BootSetup {
false
}
fn alloc_p2m_segment(
async fn alloc_p2m_segment(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
@ -310,11 +311,13 @@ impl ArchBootSetup for X86BootSetup {
}
self.table.mappings_count += 1;
p2m_alloc_size += (pgtables << X86_PAGE_SHIFT) as u64;
let p2m_segment = setup.alloc_segment(self, 0, p2m_alloc_size)?;
let p2m_segment = setup
.alloc_segment(self.page_size(), 0, p2m_alloc_size)
.await?;
Ok(Some(p2m_segment))
}
fn alloc_page_tables(
async fn alloc_page_tables(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
@ -341,7 +344,7 @@ impl ArchBootSetup for X86BootSetup {
self.table.mappings_count += 1;
setup.virt_pgtab_end = try_virt_end + 1;
let size = self.table.mappings[m].area.pgtables as u64 * X86_PAGE_SIZE;
let segment = setup.alloc_segment(self, 0, size)?;
let segment = setup.alloc_segment(self.page_size(), 0, size).await?;
debug!(
"alloc_page_tables table={:?} segment={:?}",
self.table, segment
@ -349,7 +352,11 @@ impl ArchBootSetup for X86BootSetup {
Ok(Some(segment))
}
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
async fn setup_page_tables(
&mut self,
setup: &mut BootSetup,
state: &mut BootState,
) -> Result<()> {
let p2m_segment = state
.p2m_segment
.as_ref()
@ -364,7 +371,7 @@ impl ArchBootSetup for X86BootSetup {
let map1 = &self.table.mappings[m1];
let from = map1.levels[l].from;
let to = map1.levels[l].to;
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0)? as *mut u64;
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0).await? as *mut u64;
for m2 in 0usize..self.table.mappings_count {
let map2 = &self.table.mappings[m2];
let lvl = if l > 0 {
@ -407,13 +414,16 @@ impl ArchBootSetup for X86BootSetup {
Ok(())
}
fn setup_start_info(
async fn setup_start_info(
&mut self,
setup: &mut BootSetup,
state: &BootState,
cmdline: &str,
) -> Result<()> {
let ptr = setup.phys.pfn_to_ptr(state.start_info_segment.pfn, 1)?;
let ptr = setup
.phys
.pfn_to_ptr(state.start_info_segment.pfn, 1)
.await?;
let byte_slice =
unsafe { slice::from_raw_parts_mut(ptr as *mut u8, X86_PAGE_SIZE as usize) };
byte_slice.fill(0);
@ -456,11 +466,15 @@ impl ArchBootSetup for X86BootSetup {
Ok(())
}
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()> {
async fn setup_shared_info(
&mut self,
setup: &mut BootSetup,
shared_info_frame: u64,
) -> Result<()> {
let info = setup
.phys
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)?
as *mut SharedInfo;
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
.await? as *mut SharedInfo;
unsafe {
let size = size_of::<SharedInfo>();
let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
@ -473,14 +487,14 @@ impl ArchBootSetup for X86BootSetup {
Ok(())
}
fn meminit(
async fn meminit(
&mut self,
setup: &mut BootSetup,
total_pages: u64,
_: &Option<DomainSegment>,
_: &Option<DomainSegment>,
) -> Result<()> {
setup.call.claim_pages(setup.domid, total_pages)?;
setup.call.claim_pages(setup.domid, total_pages).await?;
let mut vmemranges: Vec<VmemRange> = Vec::new();
let stub = VmemRange {
start: 0,
@ -530,13 +544,16 @@ impl ArchBootSetup for X86BootSetup {
}
let extents_init_slice = extents_init.as_slice();
let extents = setup.call.populate_physmap(
let extents = setup
.call
.populate_physmap(
setup.domid,
count,
SUPERPAGE_2MB_SHIFT as u32,
0,
&extents_init_slice[0usize..count as usize],
)?;
)
.await?;
pfn = pfn_base_idx;
for mfn in extents {
@ -558,10 +575,10 @@ impl ArchBootSetup for X86BootSetup {
let p2m_idx = (pfn_base + j) as usize;
let p2m_end_idx = p2m_idx + allocsz as usize;
let input_extent_starts = &p2m[p2m_idx..p2m_end_idx];
let result =
setup
let result = setup
.call
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)?;
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)
.await?;
if result.len() != allocsz as usize {
return Err(Error::PopulatePhysmapFailed(
@ -581,11 +598,11 @@ impl ArchBootSetup for X86BootSetup {
}
setup.phys.load_p2m(p2m);
setup.call.claim_pages(setup.domid, 0)?;
setup.call.claim_pages(setup.domid, 0).await?;
Ok(())
}
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
let p2m_segment = state
.p2m_segment
.as_ref()
@ -600,11 +617,12 @@ impl ArchBootSetup for X86BootSetup {
setup.phys.unmap(p2m_segment.pfn)?;
setup
.call
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)?;
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)
.await?;
Ok(())
}
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
let page_table_segment = state
.page_table_segment
.as_ref()
@ -633,7 +651,7 @@ impl ArchBootSetup for X86BootSetup {
vcpu.kernel_ss = vcpu.user_regs.ss as u64;
vcpu.kernel_sp = vcpu.user_regs.rsp;
trace!("vcpu context: {:?}", vcpu);
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
Ok(())
}
}

View File

@ -168,7 +168,7 @@ impl XsdClient {
})
}
pub async fn get_domain_path(&mut self, domid: u32) -> Result<String> {
pub async fn get_domain_path(&self, domid: u32) -> Result<String> {
let response = self
.socket
.send(0, XSD_GET_DOMAIN_PATH, &[&domid.to_string()])
@ -176,7 +176,7 @@ impl XsdClient {
response.parse_string()
}
pub async fn introduce_domain(&mut self, domid: u32, mfn: u64, evtchn: u32) -> Result<bool> {
pub async fn introduce_domain(&self, domid: u32, mfn: u64, evtchn: u32) -> Result<bool> {
trace!("introduce domain domid={domid} mfn={mfn} evtchn={evtchn}");
let response = self
.socket