mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 13:11:31 +00:00
krata: work on parallel reconciliation
This commit is contained in:
@ -13,6 +13,7 @@ libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
nix = { workspace = true, features = ["ioctl"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[lib]
|
||||
|
@ -2,11 +2,12 @@ use xencall::error::Result;
|
||||
use xencall::sys::CreateDomain;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let domid = call.create_domain(CreateDomain::default())?;
|
||||
let domid = call.create_domain(CreateDomain::default()).await?;
|
||||
println!("created domain {}", domid);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let info = call.get_domain_info(1)?;
|
||||
let info = call.get_domain_info(1).await?;
|
||||
println!("{:?}", info);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let context = call.get_vcpu_context(224, 0)?;
|
||||
let context = call.get_vcpu_context(224, 0).await?;
|
||||
println!("{:?}", context);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let info = call.get_version_capabilities()?;
|
||||
let info = call.get_version_capabilities().await?;
|
||||
println!("{:?}", info);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -8,8 +8,12 @@ pub enum Error {
|
||||
Kernel(#[from] nix::errno::Errno),
|
||||
#[error("io issue encountered: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("failed to acquire semaphore: {0}")]
|
||||
AcquireSemaphoreFailed(#[from] tokio::sync::AcquireError),
|
||||
#[error("populate physmap failed")]
|
||||
PopulatePhysmapFailed,
|
||||
#[error("mmap batch failed: {0}")]
|
||||
MmapBatchFailed(nix::errno::Errno),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
@ -18,15 +18,19 @@ use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
|
||||
use log::trace;
|
||||
use nix::errno::Errno;
|
||||
use std::ffi::{c_long, c_uint, c_ulong, c_void};
|
||||
use std::sync::Arc;
|
||||
use sys::{XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::ptr::addr_of_mut;
|
||||
use std::slice;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct XenCall {
|
||||
pub handle: File,
|
||||
pub handle: Arc<File>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
domctl_interface_version: u32,
|
||||
}
|
||||
|
||||
@ -39,7 +43,8 @@ impl XenCall {
|
||||
let domctl_interface_version =
|
||||
XenCall::detect_domctl_interface_version(&handle, current_domid)?;
|
||||
Ok(XenCall {
|
||||
handle,
|
||||
handle: Arc::new(handle),
|
||||
semaphore: Arc::new(Semaphore::new(1)),
|
||||
domctl_interface_version,
|
||||
})
|
||||
}
|
||||
@ -68,7 +73,8 @@ impl XenCall {
|
||||
Err(Error::XenVersionUnsupported)
|
||||
}
|
||||
|
||||
pub fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
|
||||
pub async fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
|
||||
let _permit = self.semaphore.acquire().await.ok()?;
|
||||
trace!(
|
||||
"call fd={} mmap addr={:#x} len={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -99,7 +105,8 @@ impl XenCall {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
|
||||
pub async fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
trace!(
|
||||
"call fd={} hypercall op={:#x} arg={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -113,29 +120,29 @@ impl XenCall {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [0, 0, 0, 0, 0])
|
||||
pub async fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [0, 0, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, 0, 0, 0, 0])
|
||||
pub async fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, 0, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, 0, 0, 0])
|
||||
pub async fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall3(
|
||||
pub async fn hypercall3(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
arg2: c_ulong,
|
||||
arg3: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, 0, 0])
|
||||
self.hypercall(op, [arg1, arg2, arg3, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall4(
|
||||
pub async fn hypercall4(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
@ -143,10 +150,10 @@ impl XenCall {
|
||||
arg3: c_ulong,
|
||||
arg4: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, 0])
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall5(
|
||||
pub async fn hypercall5(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
@ -155,10 +162,10 @@ impl XenCall {
|
||||
arg4: c_ulong,
|
||||
arg5: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5])
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5]).await
|
||||
}
|
||||
|
||||
pub fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
|
||||
pub async fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
|
||||
trace!(
|
||||
"call fd={} multicall calls={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -168,11 +175,12 @@ impl XenCall {
|
||||
HYPERVISOR_MULTICALL,
|
||||
calls.as_mut_ptr() as c_ulong,
|
||||
calls.len() as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn map_resource(
|
||||
pub async fn map_resource(
|
||||
&self,
|
||||
domid: u32,
|
||||
typ: u32,
|
||||
@ -181,6 +189,7 @@ impl XenCall {
|
||||
num: u64,
|
||||
addr: u64,
|
||||
) -> Result<()> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
let mut resource = MmapResource {
|
||||
dom: domid as u16,
|
||||
typ,
|
||||
@ -195,7 +204,14 @@ impl XenCall {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mmap_batch(&self, domid: u32, num: u64, addr: u64, mfns: Vec<u64>) -> Result<c_long> {
|
||||
pub async fn mmap_batch(
|
||||
&self,
|
||||
domid: u32,
|
||||
num: u64,
|
||||
addr: u64,
|
||||
mfns: Vec<u64>,
|
||||
) -> Result<c_long> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
trace!(
|
||||
"call fd={} mmap_batch domid={} num={} addr={:#x} mfns={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -218,7 +234,7 @@ impl XenCall {
|
||||
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
|
||||
if let Err(errno) = result {
|
||||
if errno != Errno::ENOENT {
|
||||
return Err(errno)?;
|
||||
return Err(Error::MmapBatchFailed(errno))?;
|
||||
}
|
||||
|
||||
usleep(100);
|
||||
@ -253,7 +269,7 @@ impl XenCall {
|
||||
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
|
||||
if let Err(n) = result {
|
||||
if n != Errno::ENOENT {
|
||||
return Err(n)?;
|
||||
return Err(Error::MmapBatchFailed(n))?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,7 +289,7 @@ impl XenCall {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
|
||||
pub async fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
|
||||
trace!(
|
||||
"call fd={} get_version_capabilities",
|
||||
self.handle.as_raw_fd()
|
||||
@ -285,26 +301,29 @@ impl XenCall {
|
||||
HYPERVISOR_XEN_VERSION,
|
||||
XENVER_CAPABILITIES,
|
||||
addr_of_mut!(info) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
pub fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
|
||||
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)?;
|
||||
pub async fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
|
||||
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
|
||||
pub async fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
|
||||
let mut alloc_unbound = EvtChnAllocUnbound {
|
||||
dom: domid as u16,
|
||||
remote_dom: remote_domid as u16,
|
||||
port: 0,
|
||||
};
|
||||
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)?;
|
||||
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)
|
||||
.await?;
|
||||
Ok(alloc_unbound.port)
|
||||
}
|
||||
|
||||
pub fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
|
||||
pub async fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
|
||||
trace!(
|
||||
"domctl fd={} get_domain_info domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -318,11 +337,12 @@ impl XenCall {
|
||||
get_domain_info: GetDomainInfo::default(),
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(unsafe { domctl.value.get_domain_info })
|
||||
}
|
||||
|
||||
pub fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
|
||||
pub async fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
|
||||
trace!(
|
||||
"domctl fd={} create_domain create_domain={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -334,11 +354,12 @@ impl XenCall {
|
||||
domid: 0,
|
||||
value: DomCtlValue { create_domain },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(domctl.domid)
|
||||
}
|
||||
|
||||
pub fn pause_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn pause_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} pause_domain domid={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -350,11 +371,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unpause_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn unpause_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} unpause_domain domid={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -366,11 +388,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
|
||||
pub async fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_max_mem domid={} memkb={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -385,11 +408,12 @@ impl XenCall {
|
||||
max_mem: MaxMem { max_memkb: memkb },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
|
||||
pub async fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_max_vcpus domid={} max_vcpus={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -404,11 +428,12 @@ impl XenCall {
|
||||
max_cpus: MaxVcpus { max_vcpus },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
|
||||
pub async fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_address_size domid={} size={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -423,11 +448,12 @@ impl XenCall {
|
||||
address_size: AddressSize { size },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
|
||||
pub async fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
|
||||
trace!(
|
||||
"domctl fd={} get_vcpu_context domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -447,11 +473,12 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(unsafe { wrapper.value })
|
||||
}
|
||||
|
||||
pub fn set_vcpu_context(
|
||||
pub async fn set_vcpu_context(
|
||||
&self,
|
||||
domid: u32,
|
||||
vcpu: u32,
|
||||
@ -476,11 +503,12 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
|
||||
pub async fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
|
||||
let mut buffer: Vec<u64> = frames.to_vec();
|
||||
let mut domctl = DomCtl {
|
||||
cmd: XEN_DOMCTL_GETPAGEFRAMEINFO3,
|
||||
@ -493,7 +521,8 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
let slice = unsafe {
|
||||
slice::from_raw_parts_mut(
|
||||
domctl.value.get_page_frame_info.array as *mut u64,
|
||||
@ -503,7 +532,7 @@ impl XenCall {
|
||||
Ok(slice.to_vec())
|
||||
}
|
||||
|
||||
pub fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
|
||||
pub async fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} hypercall_init domid={} gmfn={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -518,11 +547,12 @@ impl XenCall {
|
||||
hypercall_init: HypercallInit { gmfn },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn destroy_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn destroy_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} destroy_domain domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -534,11 +564,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
|
||||
pub async fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
|
||||
let mut memory_map = MemoryMap {
|
||||
count: 0,
|
||||
buffer: 0,
|
||||
@ -547,18 +578,20 @@ impl XenCall {
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_MEMORY_MAP as c_ulong,
|
||||
addr_of_mut!(memory_map) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
let mut buffer = vec![0u8; memory_map.count as usize * size_of_entry];
|
||||
memory_map.buffer = buffer.as_mut_ptr() as c_ulong;
|
||||
self.hypercall2(
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_MEMORY_MAP as c_ulong,
|
||||
addr_of_mut!(memory_map) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
pub fn populate_physmap(
|
||||
pub async fn populate_physmap(
|
||||
&self,
|
||||
domid: u32,
|
||||
nr_extents: u64,
|
||||
@ -590,7 +623,7 @@ impl XenCall {
|
||||
0,
|
||||
],
|
||||
}];
|
||||
self.multicall(calls)?;
|
||||
self.multicall(calls).await?;
|
||||
let code = calls[0].result;
|
||||
if code > !0xfff {
|
||||
return Err(Error::PopulatePhysmapFailed);
|
||||
@ -602,7 +635,7 @@ impl XenCall {
|
||||
Ok(extents)
|
||||
}
|
||||
|
||||
pub fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
|
||||
pub async fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
|
||||
trace!(
|
||||
"memory fd={} claim_pages domid={} pages={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -620,11 +653,12 @@ impl XenCall {
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_CLAIM_PAGES as c_ulong,
|
||||
addr_of_mut!(reservation) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
|
||||
pub async fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
|
||||
let mut ops = MmuExtOp { cmd, arg1, arg2 };
|
||||
|
||||
self.hypercall4(
|
||||
@ -634,6 +668,7 @@ impl XenCall {
|
||||
0,
|
||||
domid as c_ulong,
|
||||
)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ edition = "2021"
|
||||
resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
elf = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
@ -16,6 +17,7 @@ log = { workspace = true }
|
||||
krata-xencall = { path = "../xencall", version = "^0.0.3" }
|
||||
krata-xenstore = { path = "../xenstore", version = "^0.0.3" }
|
||||
memchr = { workspace = true }
|
||||
nix = { workspace = true }
|
||||
slice-copy = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
@ -24,6 +26,7 @@ xz2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "xenclient"
|
||||
|
@ -13,7 +13,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
let kernel_image_path = args.get(1).expect("argument not specified");
|
||||
let initrd_path = args.get(2).expect("argument not specified");
|
||||
let mut client = XenClient::open(0).await?;
|
||||
let client = XenClient::open(0).await?;
|
||||
let config = DomainConfig {
|
||||
backend_domid: 0,
|
||||
name: "xenclient-test",
|
||||
|
@ -41,9 +41,9 @@ impl Arm64BootSetup {
|
||||
Arm64BootSetup {}
|
||||
}
|
||||
|
||||
fn populate_one_size(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
async fn populate_one_size(
|
||||
&self,
|
||||
setup: &mut BootSetup<'_>,
|
||||
pfn_shift: u64,
|
||||
base_pfn: u64,
|
||||
pfn_count: u64,
|
||||
@ -78,20 +78,23 @@ impl Arm64BootSetup {
|
||||
extents[i as usize] = base_pfn + (i << pfn_shift);
|
||||
}
|
||||
|
||||
let result_extents = setup.call.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
pfn_shift as u32,
|
||||
0,
|
||||
&extents[0usize..count as usize],
|
||||
)?;
|
||||
let result_extents = setup
|
||||
.call
|
||||
.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
pfn_shift as u32,
|
||||
0,
|
||||
&extents[0usize..count as usize],
|
||||
)
|
||||
.await?;
|
||||
slice_copy::copy(extents, &result_extents);
|
||||
Ok((result_extents.len() as u64) << pfn_shift)
|
||||
}
|
||||
|
||||
fn populate_guest_memory(
|
||||
async fn populate_guest_memory(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
setup: &mut BootSetup<'_>,
|
||||
base_pfn: u64,
|
||||
pfn_count: u64,
|
||||
) -> Result<()> {
|
||||
@ -99,43 +102,51 @@ impl Arm64BootSetup {
|
||||
|
||||
for pfn in 0..extents.len() {
|
||||
let mut allocsz = (1024 * 1024).min(pfn_count - pfn as u64);
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_512G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_512G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_1G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_1G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_2M_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_2M_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_4K_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_4K_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz == 0 {
|
||||
return Err(Error::MemorySetupFailed("allocsz is zero"));
|
||||
}
|
||||
@ -145,6 +156,7 @@ impl Arm64BootSetup {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ArchBootSetup for Arm64BootSetup {
|
||||
fn page_size(&mut self) -> u64 {
|
||||
ARM_PAGE_SIZE
|
||||
@ -158,15 +170,15 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
true
|
||||
}
|
||||
|
||||
fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
|
||||
async fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
|
||||
async fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
@ -176,7 +188,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
let kernel_segment = kernel_segment
|
||||
.as_ref()
|
||||
.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
|
||||
setup.call.claim_pages(setup.domid, total_pages)?;
|
||||
setup.call.claim_pages(setup.domid, total_pages).await?;
|
||||
let mut ramsize = total_pages << XEN_PAGE_SHIFT;
|
||||
|
||||
let bankbase = GUEST_RAM_BANK_BASES;
|
||||
@ -214,7 +226,8 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
}
|
||||
|
||||
for i in 0..2 {
|
||||
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])?;
|
||||
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])
|
||||
.await?;
|
||||
}
|
||||
|
||||
let bank0end = bankbase[0] + (rambank_size[0] << XEN_PAGE_SHIFT);
|
||||
@ -227,15 +240,15 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
} else {
|
||||
return Err(Error::MemorySetupFailed("unable to determine modbase"));
|
||||
};
|
||||
setup.call.claim_pages(setup.domid, 0)?;
|
||||
setup.call.claim_pages(setup.domid, 0).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
async fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let mut vcpu = VcpuGuestContext::default();
|
||||
vcpu.user_regs.pc = state.image_info.virt_entry;
|
||||
vcpu.user_regs.x0 = 0xffffffff;
|
||||
@ -249,11 +262,11 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
vcpu.user_regs.cpsr = PSR_GUEST64_INIT;
|
||||
vcpu.flags = 1 << 0; // VGCF_ONLINE
|
||||
trace!("vcpu context: {:?}", vcpu);
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
_: &mut BootSetup,
|
||||
_: &BootImageInfo,
|
||||
@ -261,7 +274,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
_: &mut BootSetup,
|
||||
_: &BootImageInfo,
|
||||
@ -269,7 +282,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
async fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use crate::sys::{GrantEntry, XEN_PAGE_SHIFT};
|
||||
use crate::Error;
|
||||
use libc::munmap;
|
||||
use log::debug;
|
||||
use nix::errno::Errno;
|
||||
use slice_copy::copy;
|
||||
|
||||
use crate::mem::ARCH_PAGE_SHIFT;
|
||||
@ -83,33 +84,34 @@ impl BootSetup<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_memory(
|
||||
async fn initialize_memory(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
total_pages: u64,
|
||||
kernel_segment: &Option<DomainSegment>,
|
||||
initrd_segment: &Option<DomainSegment>,
|
||||
) -> Result<()> {
|
||||
self.call.set_address_size(self.domid, 64)?;
|
||||
arch.meminit(self, total_pages, kernel_segment, initrd_segment)?;
|
||||
self.call.set_address_size(self.domid, 64).await?;
|
||||
arch.meminit(self, total_pages, kernel_segment, initrd_segment)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
|
||||
async fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
|
||||
if image_info.virt_hypercall == XEN_UNSET_ADDR {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let pfn = (image_info.virt_hypercall - image_info.virt_base) >> ARCH_PAGE_SHIFT;
|
||||
let mfn = self.phys.p2m[pfn as usize];
|
||||
self.call.hypercall_init(self.domid, mfn)?;
|
||||
self.call.hypercall_init(self.domid, mfn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn initialize(
|
||||
pub async fn initialize<I: BootImageLoader + Send + Sync>(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
image_loader: &dyn BootImageLoader,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
image_loader: &I,
|
||||
initrd: &[u8],
|
||||
max_vcpus: u32,
|
||||
mem_mb: u64,
|
||||
@ -117,60 +119,68 @@ impl BootSetup<'_> {
|
||||
) -> Result<BootState> {
|
||||
debug!("initialize max_vcpus={:?} mem_mb={:?}", max_vcpus, mem_mb);
|
||||
|
||||
let page_size = arch.page_size();
|
||||
let image_info = image_loader.parse()?;
|
||||
debug!("initialize image_info={:?}", image_info);
|
||||
let mut kernel_segment: Option<DomainSegment> = None;
|
||||
let mut initrd_segment: Option<DomainSegment> = None;
|
||||
if !image_info.unmapped_initrd {
|
||||
initrd_segment = Some(self.alloc_module(arch, initrd)?);
|
||||
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
|
||||
}
|
||||
|
||||
if arch.needs_early_kernel() {
|
||||
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
|
||||
kernel_segment = Some(
|
||||
self.load_kernel_segment(page_size, image_loader, &image_info)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let total_pages = mem_mb << (20 - arch.page_shift());
|
||||
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)?;
|
||||
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)
|
||||
.await?;
|
||||
self.virt_alloc_end = image_info.virt_base;
|
||||
|
||||
if kernel_segment.is_none() {
|
||||
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
|
||||
kernel_segment = Some(
|
||||
self.load_kernel_segment(page_size, image_loader, &image_info)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let mut p2m_segment: Option<DomainSegment> = None;
|
||||
if image_info.virt_p2m_base >= image_info.virt_base
|
||||
|| (image_info.virt_p2m_base & ((1 << arch.page_shift()) - 1)) != 0
|
||||
{
|
||||
p2m_segment = arch.alloc_p2m_segment(self, &image_info)?;
|
||||
p2m_segment = arch.alloc_p2m_segment(self, &image_info).await?;
|
||||
}
|
||||
let start_info_segment = self.alloc_page(arch)?;
|
||||
let xenstore_segment = self.alloc_page(arch)?;
|
||||
let start_info_segment = self.alloc_page(page_size)?;
|
||||
let xenstore_segment = self.alloc_page(page_size)?;
|
||||
let mut consoles: Vec<(u32, DomainSegment)> = Vec::new();
|
||||
for _ in 0..console_count {
|
||||
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
|
||||
let page = self.alloc_page(arch)?;
|
||||
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
|
||||
let page = self.alloc_page(page_size)?;
|
||||
consoles.push((evtchn, page));
|
||||
}
|
||||
let page_table_segment = arch.alloc_page_tables(self, &image_info)?;
|
||||
let boot_stack_segment = self.alloc_page(arch)?;
|
||||
let page_table_segment = arch.alloc_page_tables(self, &image_info).await?;
|
||||
let boot_stack_segment = self.alloc_page(page_size)?;
|
||||
|
||||
if self.virt_pgtab_end > 0 {
|
||||
self.alloc_padding_pages(arch, self.virt_pgtab_end)?;
|
||||
self.alloc_padding_pages(page_size, self.virt_pgtab_end)?;
|
||||
}
|
||||
|
||||
if p2m_segment.is_none() {
|
||||
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info)? {
|
||||
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info).await? {
|
||||
segment.vstart = image_info.virt_p2m_base;
|
||||
p2m_segment = Some(segment);
|
||||
}
|
||||
}
|
||||
|
||||
if image_info.unmapped_initrd {
|
||||
initrd_segment = Some(self.alloc_module(arch, initrd)?);
|
||||
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
|
||||
}
|
||||
|
||||
let initrd_segment = initrd_segment.unwrap();
|
||||
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
|
||||
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
|
||||
|
||||
let kernel_segment =
|
||||
kernel_segment.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
|
||||
@ -192,35 +202,37 @@ impl BootSetup<'_> {
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn boot(
|
||||
pub async fn boot(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
state: &mut BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()> {
|
||||
let domain_info = self.call.get_domain_info(self.domid)?;
|
||||
let domain_info = self.call.get_domain_info(self.domid).await?;
|
||||
let shared_info_frame = domain_info.shared_info_frame;
|
||||
state.shared_info_frame = shared_info_frame;
|
||||
arch.setup_page_tables(self, state)?;
|
||||
arch.setup_start_info(self, state, cmdline)?;
|
||||
self.setup_hypercall_page(&state.image_info)?;
|
||||
arch.bootlate(self, state)?;
|
||||
arch.setup_shared_info(self, state.shared_info_frame)?;
|
||||
arch.vcpu(self, state)?;
|
||||
arch.setup_page_tables(self, state).await?;
|
||||
arch.setup_start_info(self, state, cmdline).await?;
|
||||
self.setup_hypercall_page(&state.image_info).await?;
|
||||
arch.bootlate(self, state).await?;
|
||||
arch.setup_shared_info(self, state.shared_info_frame)
|
||||
.await?;
|
||||
arch.vcpu(self, state).await?;
|
||||
self.phys.unmap_all()?;
|
||||
self.gnttab_seed(state)?;
|
||||
self.gnttab_seed(state).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
|
||||
async fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
|
||||
let console_gfn =
|
||||
self.phys.p2m[state.consoles.first().map(|x| x.1.pfn).unwrap_or(0) as usize];
|
||||
let xenstore_gfn = self.phys.p2m[state.xenstore_segment.pfn as usize];
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, 1 << XEN_PAGE_SHIFT)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
self.call.map_resource(self.domid, 1, 0, 0, 1, addr)?;
|
||||
self.call.map_resource(self.domid, 1, 0, 0, 1, addr).await?;
|
||||
let entries = unsafe { slice::from_raw_parts_mut(addr as *mut GrantEntry, 2) };
|
||||
entries[0].flags = 1 << 0;
|
||||
entries[0].domid = 0;
|
||||
@ -231,23 +243,25 @@ impl BootSetup<'_> {
|
||||
unsafe {
|
||||
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
|
||||
if result != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(result)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_kernel_segment(
|
||||
async fn load_kernel_segment<I: BootImageLoader + Send + Sync>(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
image_loader: &dyn BootImageLoader,
|
||||
page_size: u64,
|
||||
image_loader: &I,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<DomainSegment> {
|
||||
let kernel_segment = self.alloc_segment(
|
||||
arch,
|
||||
image_info.virt_kstart,
|
||||
image_info.virt_kend - image_info.virt_kstart,
|
||||
)?;
|
||||
let kernel_segment = self
|
||||
.alloc_segment(
|
||||
page_size,
|
||||
image_info.virt_kstart,
|
||||
image_info.virt_kend - image_info.virt_kstart,
|
||||
)
|
||||
.await?;
|
||||
let kernel_segment_ptr = kernel_segment.addr as *mut u8;
|
||||
let kernel_segment_slice =
|
||||
unsafe { slice::from_raw_parts_mut(kernel_segment_ptr, kernel_segment.size as usize) };
|
||||
@ -264,18 +278,19 @@ impl BootSetup<'_> {
|
||||
(1 << bits) - 1
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_segment(
|
||||
pub(crate) async fn alloc_segment(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
page_size: u64,
|
||||
start: u64,
|
||||
size: u64,
|
||||
) -> Result<DomainSegment> {
|
||||
debug!("alloc_segment {:#x} {:#x}", start, size);
|
||||
if start > 0 {
|
||||
self.alloc_padding_pages(arch, start)?;
|
||||
self.alloc_padding_pages(page_size, start)?;
|
||||
}
|
||||
|
||||
let page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
|
||||
let pages = (size + page_size as u64 - 1) / page_size as u64;
|
||||
let local_page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
|
||||
let pages = (size + local_page_size as u64 - 1) / local_page_size as u64;
|
||||
let start = self.virt_alloc_end;
|
||||
|
||||
let mut segment = DomainSegment {
|
||||
@ -288,12 +303,12 @@ impl BootSetup<'_> {
|
||||
pages,
|
||||
};
|
||||
|
||||
self.chk_alloc_pages(arch, pages)?;
|
||||
self.chk_alloc_pages(page_size, pages)?;
|
||||
|
||||
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages)?;
|
||||
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages).await?;
|
||||
segment.addr = ptr;
|
||||
let slice = unsafe {
|
||||
slice::from_raw_parts_mut(ptr as *mut u8, (pages * page_size as u64) as usize)
|
||||
slice::from_raw_parts_mut(ptr as *mut u8, (pages * local_page_size as u64) as usize)
|
||||
};
|
||||
slice.fill(0);
|
||||
segment.vend = self.virt_alloc_end;
|
||||
@ -304,15 +319,15 @@ impl BootSetup<'_> {
|
||||
Ok(segment)
|
||||
}
|
||||
|
||||
fn alloc_page(&mut self, arch: &mut dyn ArchBootSetup) -> Result<DomainSegment> {
|
||||
fn alloc_page(&mut self, page_size: u64) -> Result<DomainSegment> {
|
||||
let start = self.virt_alloc_end;
|
||||
let pfn = self.pfn_alloc_end;
|
||||
|
||||
self.chk_alloc_pages(arch, 1)?;
|
||||
self.chk_alloc_pages(page_size, 1)?;
|
||||
debug!("alloc_page {:#x} (pfn {:#x})", start, pfn);
|
||||
Ok(DomainSegment {
|
||||
vstart: start,
|
||||
vend: (start + arch.page_size()) - 1,
|
||||
vend: (start + page_size) - 1,
|
||||
pfn,
|
||||
addr: 0,
|
||||
size: 0,
|
||||
@ -321,31 +336,29 @@ impl BootSetup<'_> {
|
||||
})
|
||||
}
|
||||
|
||||
fn alloc_module(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
buffer: &[u8],
|
||||
) -> Result<DomainSegment> {
|
||||
let segment = self.alloc_segment(arch, 0, buffer.len() as u64)?;
|
||||
async fn alloc_module(&mut self, page_size: u64, buffer: &[u8]) -> Result<DomainSegment> {
|
||||
let segment = self
|
||||
.alloc_segment(page_size, 0, buffer.len() as u64)
|
||||
.await?;
|
||||
let slice = unsafe { slice::from_raw_parts_mut(segment.addr as *mut u8, buffer.len()) };
|
||||
copy(slice, buffer);
|
||||
Ok(segment)
|
||||
}
|
||||
|
||||
fn alloc_padding_pages(&mut self, arch: &mut dyn ArchBootSetup, boundary: u64) -> Result<()> {
|
||||
if (boundary & (arch.page_size() - 1)) != 0 {
|
||||
fn alloc_padding_pages(&mut self, page_size: u64, boundary: u64) -> Result<()> {
|
||||
if (boundary & (page_size - 1)) != 0 {
|
||||
return Err(Error::MemorySetupFailed("boundary is incorrect"));
|
||||
}
|
||||
|
||||
if boundary < self.virt_alloc_end {
|
||||
return Err(Error::MemorySetupFailed("boundary is below allocation end"));
|
||||
}
|
||||
let pages = (boundary - self.virt_alloc_end) / arch.page_size();
|
||||
self.chk_alloc_pages(arch, pages)?;
|
||||
let pages = (boundary - self.virt_alloc_end) / page_size;
|
||||
self.chk_alloc_pages(page_size, pages)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn chk_alloc_pages(&mut self, arch: &mut dyn ArchBootSetup, pages: u64) -> Result<()> {
|
||||
fn chk_alloc_pages(&mut self, page_size: u64, pages: u64) -> Result<()> {
|
||||
if pages > self.total_pages
|
||||
|| self.pfn_alloc_end > self.total_pages
|
||||
|| pages > self.total_pages - self.pfn_alloc_end
|
||||
@ -354,47 +367,56 @@ impl BootSetup<'_> {
|
||||
}
|
||||
|
||||
self.pfn_alloc_end += pages;
|
||||
self.virt_alloc_end += pages * arch.page_size();
|
||||
self.virt_alloc_end += pages * page_size;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait ArchBootSetup {
|
||||
fn page_size(&mut self) -> u64;
|
||||
fn page_shift(&mut self) -> u64;
|
||||
|
||||
fn needs_early_kernel(&mut self) -> bool;
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<Option<DomainSegment>>;
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<Option<DomainSegment>>;
|
||||
|
||||
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn setup_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &mut BootState,
|
||||
) -> Result<()>;
|
||||
|
||||
fn setup_start_info(
|
||||
async fn setup_start_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()>;
|
||||
|
||||
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()>;
|
||||
async fn setup_shared_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
shared_info_frame: u64,
|
||||
) -> Result<()>;
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
kernel_segment: &Option<DomainSegment>,
|
||||
initrd_segment: &Option<DomainSegment>,
|
||||
) -> Result<()>;
|
||||
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ pub enum Error {
|
||||
ElfParseFailed(#[from] elf::ParseError),
|
||||
#[error("mmap failed")]
|
||||
MmapFailed,
|
||||
#[error("munmap failed")]
|
||||
UnmapFailed,
|
||||
#[error("munmap failed: {0}")]
|
||||
UnmapFailed(nix::errno::Errno),
|
||||
#[error("memory setup failed: {0}")]
|
||||
MemorySetupFailed(&'static str),
|
||||
#[error("populate physmap failed: wanted={0}, received={1}, input_extents={2}")]
|
||||
|
@ -16,7 +16,7 @@ pub mod arm64;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use crate::arm64::Arm64BootSetup;
|
||||
|
||||
use crate::boot::BootSetup;
|
||||
use crate::boot::{ArchBootSetup, BootSetup};
|
||||
use crate::elfloader::ElfImageLoader;
|
||||
use crate::error::{Error, Result};
|
||||
use boot::BootState;
|
||||
@ -34,6 +34,7 @@ use xenstore::{
|
||||
XsPermission, XsdClient, XsdInterface, XS_PERM_NONE, XS_PERM_READ, XS_PERM_READ_WRITE,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct XenClient {
|
||||
pub store: XsdClient,
|
||||
call: XenCall,
|
||||
@ -115,7 +116,7 @@ impl XenClient {
|
||||
Ok(XenClient { store, call })
|
||||
}
|
||||
|
||||
pub async fn create(&mut self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
|
||||
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
|
||||
let mut domain = CreateDomain {
|
||||
max_vcpus: config.max_vcpus,
|
||||
..Default::default()
|
||||
@ -125,7 +126,7 @@ impl XenClient {
|
||||
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
|
||||
}
|
||||
|
||||
let domid = self.call.create_domain(domain)?;
|
||||
let domid = self.call.create_domain(domain).await?;
|
||||
match self.init(domid, &domain, config).await {
|
||||
Ok(created) => Ok(created),
|
||||
Err(err) => {
|
||||
@ -138,7 +139,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn init(
|
||||
&mut self,
|
||||
&self,
|
||||
domid: u32,
|
||||
domain: &CreateDomain,
|
||||
config: &DomainConfig<'_>,
|
||||
@ -253,8 +254,8 @@ impl XenClient {
|
||||
tx.commit().await?;
|
||||
}
|
||||
|
||||
self.call.set_max_vcpus(domid, config.max_vcpus)?;
|
||||
self.call.set_max_mem(domid, config.mem_mb * 1024)?;
|
||||
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
||||
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
|
||||
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
|
||||
|
||||
let xenstore_evtchn: u32;
|
||||
@ -265,19 +266,21 @@ impl XenClient {
|
||||
{
|
||||
let mut boot = BootSetup::new(&self.call, domid);
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
let mut arch = X86BootSetup::new();
|
||||
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
let mut arch = Arm64BootSetup::new();
|
||||
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||
let initrd = read(config.initrd_path)?;
|
||||
state = boot.initialize(
|
||||
&mut arch,
|
||||
&image_loader,
|
||||
initrd.as_slice(),
|
||||
config.max_vcpus,
|
||||
config.mem_mb,
|
||||
1,
|
||||
)?;
|
||||
boot.boot(&mut arch, &mut state, config.cmdline)?;
|
||||
state = boot
|
||||
.initialize(
|
||||
&mut arch,
|
||||
&image_loader,
|
||||
initrd.as_slice(),
|
||||
config.max_vcpus,
|
||||
config.mem_mb,
|
||||
1,
|
||||
)
|
||||
.await?;
|
||||
boot.boot(&mut arch, &mut state, config.cmdline).await?;
|
||||
xenstore_evtchn = state.store_evtchn;
|
||||
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
|
||||
p2m = boot.phys.p2m;
|
||||
@ -418,7 +421,8 @@ impl XenClient {
|
||||
for channel in &config.event_channels {
|
||||
let id = self
|
||||
.call
|
||||
.evtchn_alloc_unbound(domid, config.backend_domid)?;
|
||||
.evtchn_alloc_unbound(domid, config.backend_domid)
|
||||
.await?;
|
||||
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
|
||||
self.store
|
||||
.write_string(&format!("{}/name", channel_path), channel.name)
|
||||
@ -428,12 +432,12 @@ impl XenClient {
|
||||
.await?;
|
||||
}
|
||||
|
||||
self.call.unpause_domain(domid)?;
|
||||
self.call.unpause_domain(domid).await?;
|
||||
Ok(CreatedDomain { domid, channels })
|
||||
}
|
||||
|
||||
async fn disk_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -486,7 +490,7 @@ impl XenClient {
|
||||
|
||||
#[allow(clippy::too_many_arguments, clippy::unnecessary_unwrap)]
|
||||
async fn console_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
channel: &DomainChannel,
|
||||
p2m: &[u64],
|
||||
state: &BootState,
|
||||
@ -553,7 +557,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn fs_9p_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -591,7 +595,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn vif_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -650,7 +654,7 @@ impl XenClient {
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
typ: &str,
|
||||
id: u64,
|
||||
dom_path: &str,
|
||||
@ -713,15 +717,15 @@ impl XenClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn destroy(&mut self, domid: u32) -> Result<()> {
|
||||
pub async fn destroy(&self, domid: u32) -> Result<()> {
|
||||
if let Err(err) = self.destroy_store(domid).await {
|
||||
warn!("failed to destroy store for domain {}: {}", domid, err);
|
||||
}
|
||||
self.call.destroy_domain(domid)?;
|
||||
self.call.destroy_domain(domid).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn destroy_store(&mut self, domid: u32) -> Result<()> {
|
||||
async fn destroy_store(&self, domid: u32) -> Result<()> {
|
||||
let dom_path = self.store.get_domain_path(domid).await?;
|
||||
let vm_path = self.store.read_string(&format!("{}/vm", dom_path)).await?;
|
||||
if vm_path.is_none() {
|
||||
@ -813,7 +817,7 @@ impl XenClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_console_path(&mut self, domid: u32) -> Result<String> {
|
||||
pub async fn get_console_path(&self, domid: u32) -> Result<String> {
|
||||
let dom_path = self.store.get_domain_path(domid).await?;
|
||||
let console_tty_path = format!("{}/console/tty", dom_path);
|
||||
let mut tty: Option<String> = None;
|
||||
|
@ -3,6 +3,7 @@ use crate::sys::{XEN_PAGE_SHIFT, XEN_PAGE_SIZE};
|
||||
use crate::Error;
|
||||
use libc::munmap;
|
||||
use log::debug;
|
||||
use nix::errno::Errno;
|
||||
use std::ffi::c_void;
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
@ -45,7 +46,7 @@ impl PhysicalPages<'_> {
|
||||
self.p2m.len() as u64
|
||||
}
|
||||
|
||||
pub fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
pub async fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
for page in &self.pages {
|
||||
if pfn >= page.pfn + page.count {
|
||||
continue;
|
||||
@ -76,10 +77,10 @@ impl PhysicalPages<'_> {
|
||||
return Err(Error::MemorySetupFailed("page count is zero"));
|
||||
}
|
||||
|
||||
self.pfn_alloc(pfn, count)
|
||||
self.pfn_alloc(pfn, count).await
|
||||
}
|
||||
|
||||
fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
async fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
let mut entries = vec![MmapEntry::default(); count as usize];
|
||||
for (i, entry) in entries.iter_mut().enumerate() {
|
||||
entry.mfn = self.p2m[pfn as usize + i];
|
||||
@ -98,9 +99,13 @@ impl PhysicalPages<'_> {
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, actual_mmap_len)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
|
||||
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
|
||||
let result = self
|
||||
.call
|
||||
.mmap_batch(self.domid, num as u64, addr, pfns)
|
||||
.await?;
|
||||
if result != 0 {
|
||||
return Err(Error::MmapFailed);
|
||||
}
|
||||
@ -117,7 +122,7 @@ impl PhysicalPages<'_> {
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
pub fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
|
||||
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
|
||||
let num = ((size + XEN_PAGE_SIZE - 1) >> XEN_PAGE_SHIFT) as usize;
|
||||
let mut pfns = vec![u64::MAX; num];
|
||||
for (i, item) in pfns.iter_mut().enumerate().take(num) {
|
||||
@ -128,9 +133,13 @@ impl PhysicalPages<'_> {
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, actual_mmap_len)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
|
||||
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
|
||||
let result = self
|
||||
.call
|
||||
.mmap_batch(self.domid, num as u64, addr, pfns)
|
||||
.await?;
|
||||
if result != 0 {
|
||||
return Err(Error::MmapFailed);
|
||||
}
|
||||
@ -155,7 +164,7 @@ impl PhysicalPages<'_> {
|
||||
(page.count << ARCH_PAGE_SHIFT) as usize,
|
||||
);
|
||||
if err != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(err)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -181,7 +190,7 @@ impl PhysicalPages<'_> {
|
||||
page.ptr
|
||||
);
|
||||
if err != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(err)));
|
||||
}
|
||||
self.pages.remove(i);
|
||||
}
|
||||
|
@ -275,6 +275,7 @@ impl X86BootSetup {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ArchBootSetup for X86BootSetup {
|
||||
fn page_size(&mut self) -> u64 {
|
||||
X86_PAGE_SIZE
|
||||
@ -288,7 +289,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
false
|
||||
}
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
@ -310,11 +311,13 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
self.table.mappings_count += 1;
|
||||
p2m_alloc_size += (pgtables << X86_PAGE_SHIFT) as u64;
|
||||
let p2m_segment = setup.alloc_segment(self, 0, p2m_alloc_size)?;
|
||||
let p2m_segment = setup
|
||||
.alloc_segment(self.page_size(), 0, p2m_alloc_size)
|
||||
.await?;
|
||||
Ok(Some(p2m_segment))
|
||||
}
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
@ -341,7 +344,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
self.table.mappings_count += 1;
|
||||
setup.virt_pgtab_end = try_virt_end + 1;
|
||||
let size = self.table.mappings[m].area.pgtables as u64 * X86_PAGE_SIZE;
|
||||
let segment = setup.alloc_segment(self, 0, size)?;
|
||||
let segment = setup.alloc_segment(self.page_size(), 0, size).await?;
|
||||
debug!(
|
||||
"alloc_page_tables table={:?} segment={:?}",
|
||||
self.table, segment
|
||||
@ -349,7 +352,11 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(Some(segment))
|
||||
}
|
||||
|
||||
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn setup_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &mut BootState,
|
||||
) -> Result<()> {
|
||||
let p2m_segment = state
|
||||
.p2m_segment
|
||||
.as_ref()
|
||||
@ -364,7 +371,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
let map1 = &self.table.mappings[m1];
|
||||
let from = map1.levels[l].from;
|
||||
let to = map1.levels[l].to;
|
||||
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0)? as *mut u64;
|
||||
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0).await? as *mut u64;
|
||||
for m2 in 0usize..self.table.mappings_count {
|
||||
let map2 = &self.table.mappings[m2];
|
||||
let lvl = if l > 0 {
|
||||
@ -407,13 +414,16 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_start_info(
|
||||
async fn setup_start_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()> {
|
||||
let ptr = setup.phys.pfn_to_ptr(state.start_info_segment.pfn, 1)?;
|
||||
let ptr = setup
|
||||
.phys
|
||||
.pfn_to_ptr(state.start_info_segment.pfn, 1)
|
||||
.await?;
|
||||
let byte_slice =
|
||||
unsafe { slice::from_raw_parts_mut(ptr as *mut u8, X86_PAGE_SIZE as usize) };
|
||||
byte_slice.fill(0);
|
||||
@ -456,11 +466,15 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()> {
|
||||
async fn setup_shared_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
shared_info_frame: u64,
|
||||
) -> Result<()> {
|
||||
let info = setup
|
||||
.phys
|
||||
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)?
|
||||
as *mut SharedInfo;
|
||||
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
|
||||
.await? as *mut SharedInfo;
|
||||
unsafe {
|
||||
let size = size_of::<SharedInfo>();
|
||||
let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
|
||||
@ -473,14 +487,14 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
_: &Option<DomainSegment>,
|
||||
_: &Option<DomainSegment>,
|
||||
) -> Result<()> {
|
||||
setup.call.claim_pages(setup.domid, total_pages)?;
|
||||
setup.call.claim_pages(setup.domid, total_pages).await?;
|
||||
let mut vmemranges: Vec<VmemRange> = Vec::new();
|
||||
let stub = VmemRange {
|
||||
start: 0,
|
||||
@ -530,13 +544,16 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
|
||||
let extents_init_slice = extents_init.as_slice();
|
||||
let extents = setup.call.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
SUPERPAGE_2MB_SHIFT as u32,
|
||||
0,
|
||||
&extents_init_slice[0usize..count as usize],
|
||||
)?;
|
||||
let extents = setup
|
||||
.call
|
||||
.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
SUPERPAGE_2MB_SHIFT as u32,
|
||||
0,
|
||||
&extents_init_slice[0usize..count as usize],
|
||||
)
|
||||
.await?;
|
||||
|
||||
pfn = pfn_base_idx;
|
||||
for mfn in extents {
|
||||
@ -558,10 +575,10 @@ impl ArchBootSetup for X86BootSetup {
|
||||
let p2m_idx = (pfn_base + j) as usize;
|
||||
let p2m_end_idx = p2m_idx + allocsz as usize;
|
||||
let input_extent_starts = &p2m[p2m_idx..p2m_end_idx];
|
||||
let result =
|
||||
setup
|
||||
.call
|
||||
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)?;
|
||||
let result = setup
|
||||
.call
|
||||
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)
|
||||
.await?;
|
||||
|
||||
if result.len() != allocsz as usize {
|
||||
return Err(Error::PopulatePhysmapFailed(
|
||||
@ -581,11 +598,11 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
|
||||
setup.phys.load_p2m(p2m);
|
||||
setup.call.claim_pages(setup.domid, 0)?;
|
||||
setup.call.claim_pages(setup.domid, 0).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let p2m_segment = state
|
||||
.p2m_segment
|
||||
.as_ref()
|
||||
@ -600,11 +617,12 @@ impl ArchBootSetup for X86BootSetup {
|
||||
setup.phys.unmap(p2m_segment.pfn)?;
|
||||
setup
|
||||
.call
|
||||
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)?;
|
||||
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let page_table_segment = state
|
||||
.page_table_segment
|
||||
.as_ref()
|
||||
@ -633,7 +651,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
vcpu.kernel_ss = vcpu.user_regs.ss as u64;
|
||||
vcpu.kernel_sp = vcpu.user_regs.rsp;
|
||||
trace!("vcpu context: {:?}", vcpu);
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ impl XsdClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_domain_path(&mut self, domid: u32) -> Result<String> {
|
||||
pub async fn get_domain_path(&self, domid: u32) -> Result<String> {
|
||||
let response = self
|
||||
.socket
|
||||
.send(0, XSD_GET_DOMAIN_PATH, &[&domid.to_string()])
|
||||
@ -176,7 +176,7 @@ impl XsdClient {
|
||||
response.parse_string()
|
||||
}
|
||||
|
||||
pub async fn introduce_domain(&mut self, domid: u32, mfn: u64, evtchn: u32) -> Result<bool> {
|
||||
pub async fn introduce_domain(&self, domid: u32, mfn: u64, evtchn: u32) -> Result<bool> {
|
||||
trace!("introduce domain domid={domid} mfn={mfn} evtchn={evtchn}");
|
||||
let response = self
|
||||
.socket
|
||||
|
Reference in New Issue
Block a user