feature(xen): dynamic platform architecture (#194)

* wip hvm

* feat: move platform stuff all into it's own thing

* hvm work

* more hvm work

* more hvm work

* feat: rework to support multiple platforms

* hvm nonredist

* more hvm work

* more hvm work

* pvh work

* work on loading cmdline

* implement initrd loading for pvh

* partially working pvh support

* fix merge issues

* pvh works!

* swap over to pv support

* remove old kernel stuff

* fix support for pv

* pvh is gone for now

* fix(runtime): debug should be respected

* fix(xen): arm64 is currently unsupported, treat it as such at runtime

* fix(examples): use architecture cfg for boot example

* fix(x86): use IOMMU only when needed for passthrough

* chore(build): print kernel architecture during fetch
This commit is contained in:
Alex Zenla
2024-06-20 19:42:45 -07:00
committed by GitHub
parent 2c7210d85e
commit e219f3adf1
25 changed files with 1124 additions and 11293 deletions

View File

@ -3,34 +3,38 @@ pub mod sys;
use crate::error::{Error, Result};
use crate::sys::{
AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext,
EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, Hypercall, HypercallInit,
IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PciAssignDevice,
VcpuGuestContext, VcpuGuestContextAny, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL,
HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP, HYPERVISOR_MULTICALL,
HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE, XEN_DOMCTL_CREATEDOMAIN,
XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO, XEN_DOMCTL_GETPAGEFRAMEINFO3,
XEN_DOMCTL_GETVCPUCONTEXT, XEN_DOMCTL_HYPERCALL_INIT, XEN_DOMCTL_IOMEM_PERMISSION,
XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION, XEN_DOMCTL_MAX_MEM,
XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETVCPUCONTEXT,
XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP,
AddToPhysmap, AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext,
EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall,
HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PagingMempool,
PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL,
HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP,
HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE,
XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO,
XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT,
XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION,
XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT,
XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_ADD_TO_PHYSMAP, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP,
XEN_MEM_POPULATE_PHYSMAP,
};
use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use libc::{c_int, mmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use log::trace;
use nix::errno::Errno;
use std::ffi::{c_long, c_uint, c_ulong, c_void};
use std::sync::Arc;
use std::time::Duration;
use sys::{
E820Entry, ForeignMemoryMap, PhysdevMapPirq, HYPERVISOR_PHYSDEV_OP, PHYSDEVOP_MAP_PIRQ,
XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP,
E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP,
PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION,
XEN_MEM_SET_MEMORY_MAP,
};
use tokio::sync::Semaphore;
use tokio::time::sleep;
use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd;
use std::ptr::addr_of_mut;
use std::ptr::{addr_of_mut, null_mut};
use std::slice;
#[derive(Clone)]
@ -233,8 +237,8 @@ impl XenCall {
num: num as u32,
domid: domid as u16,
addr,
mfns: mfns.as_mut_ptr(),
errors: errors.as_mut_ptr(),
mfns: mfns.as_mut_ptr() as u64,
errors: errors.as_mut_ptr() as u64,
};
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
@ -243,7 +247,7 @@ impl XenCall {
return Err(Error::MmapBatchFailed(errno))?;
}
usleep(100);
sleep(Duration::from_micros(100)).await;
let mut i: usize = 0;
let mut paged: usize = 0;
@ -258,8 +262,8 @@ impl XenCall {
num: 1,
domid: domid as u16,
addr: addr + ((i as u64) << 12),
mfns: mfns.as_mut_ptr().add(i),
errors: errors.as_mut_ptr().add(i),
mfns: mfns.as_mut_ptr().add(i) as u64,
errors: errors.as_mut_ptr().add(i) as u64,
};
loop {
@ -459,45 +463,19 @@ impl XenCall {
Ok(())
}
pub async fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
trace!(
"domctl fd={} get_vcpu_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut wrapper = VcpuGuestContextAny {
value: VcpuGuestContext::default(),
};
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_GETVCPUCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
vcpu_context: DomCtlVcpuContext {
vcpu,
ctx: addr_of_mut!(wrapper) as c_ulong,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { wrapper.value })
}
pub async fn set_vcpu_context(
&self,
domid: u32,
vcpu: u32,
context: &VcpuGuestContext,
mut context: VcpuGuestContextAny,
) -> Result<()> {
trace!(
"domctl fd={} set_vcpu_context domid={} context={:?}",
self.handle.as_raw_fd(),
domid,
context,
unsafe { context.value }
);
let mut value = VcpuGuestContextAny { value: *context };
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SETVCPUCONTEXT,
interface_version: self.domctl_interface_version,
@ -505,7 +483,7 @@ impl XenCall {
value: DomCtlValue {
vcpu_context: DomCtlVcpuContext {
vcpu,
ctx: addr_of_mut!(value) as c_ulong,
ctx: addr_of_mut!(context) as c_ulong,
},
},
};
@ -597,6 +575,12 @@ impl XenCall {
domid: u32,
entries: Vec<E820Entry>,
) -> Result<Vec<E820Entry>> {
trace!(
"fd={} set_memory_map domid={} entries={:?}",
self.handle.as_raw_fd(),
domid,
entries
);
let mut memory_map = ForeignMemoryMap {
domid: domid as u16,
map: MemoryMap {
@ -633,24 +617,14 @@ impl XenCall {
domid: domid as u16,
};
let calls = &mut [MultiCallEntry {
op: HYPERVISOR_MEMORY_OP,
result: 0,
args: [
let code = self
.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_POPULATE_PHYSMAP as c_ulong,
addr_of_mut!(reservation) as c_ulong,
0,
0,
0,
0,
],
}];
self.multicall(calls).await?;
let code = calls[0].result;
if code > !0xfff {
return Err(Error::PopulatePhysmapFailed);
}
if code as usize > extent_starts.len() {
)
.await?;
if code as usize != extent_starts.len() {
return Err(Error::PopulatePhysmapFailed);
}
let extents = extent_starts[0..code as usize].to_vec();
@ -680,6 +654,31 @@ impl XenCall {
Ok(())
}
pub async fn add_to_physmap(&self, domid: u32, space: u32, idx: u64, pfn: u64) -> Result<()> {
trace!(
"memory fd={} add_to_physmap domid={} space={} idx={} pfn={}",
self.handle.as_raw_fd(),
domid,
space,
idx,
pfn,
);
let mut add = AddToPhysmap {
domid: domid as u16,
size: 0,
space,
idx,
gpfn: pfn,
};
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_ADD_TO_PHYSMAP as c_ulong,
addr_of_mut!(add) as c_ulong,
)
.await?;
Ok(())
}
pub async fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
let mut ops = MmuExtOp { cmd, arg1, arg2 };
@ -783,6 +782,7 @@ impl XenCall {
Ok(())
}
#[allow(clippy::field_reassign_with_default)]
pub async fn map_pirq(&self, domid: u32, index: isize, pirq: Option<u32>) -> Result<u32> {
trace!(
"physdev fd={} map_pirq domid={} index={} pirq={:?}",
@ -835,4 +835,86 @@ impl XenCall {
.await?;
Ok(())
}
#[allow(clippy::field_reassign_with_default)]
pub async fn set_hvm_param(&self, domid: u32, index: u32, value: u64) -> Result<()> {
trace!(
"set_hvm_param fd={} domid={} index={} value={:?}",
self.handle.as_raw_fd(),
domid,
index,
value,
);
let mut param = HvmParam::default();
param.domid = domid as u16;
param.index = index;
param.value = value;
self.hypercall2(HYPERVISOR_HVM_OP, 0, addr_of_mut!(param) as c_ulong)
.await?;
Ok(())
}
pub async fn get_hvm_context(&self, domid: u32, buffer: Option<&mut [u8]>) -> Result<u32> {
trace!(
"domctl fd={} get_hvm_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_GETHVMCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
hvm_context: HvmContext {
size: buffer.as_ref().map(|x| x.len()).unwrap_or(0) as u32,
buffer: buffer.map(|x| x.as_mut_ptr()).unwrap_or(null_mut()) as u64,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.hvm_context.size })
}
pub async fn set_hvm_context(&self, domid: u32, buffer: &mut [u8]) -> Result<u32> {
trace!(
"domctl fd={} set_hvm_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SETHVMCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
hvm_context: HvmContext {
size: buffer.len() as u32,
buffer: buffer.as_ptr() as u64,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.hvm_context.size })
}
pub async fn set_paging_mempool_size(&self, domid: u32, size: u64) -> Result<()> {
trace!(
"domctl fd={} set_paging_mempool_size domid={} size={}",
self.handle.as_raw_fd(),
domid,
size,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
paging_mempool: PagingMempool { size },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
}

View File

@ -35,8 +35,8 @@ pub struct MmapBatch {
pub num: u32,
pub domid: u16,
pub addr: u64,
pub mfns: *mut u64,
pub errors: *mut c_int,
pub mfns: u64,
pub errors: u64,
}
#[repr(C)]
@ -200,6 +200,7 @@ pub const XEN_DOMCTL_PSR_CAT_OP: u32 = 78;
pub const XEN_DOMCTL_SOFT_RESET: u32 = 79;
pub const XEN_DOMCTL_SET_GNTTAB_LIMITS: u32 = 80;
pub const XEN_DOMCTL_VUART_OP: u32 = 81;
pub const XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE: u32 = 86;
pub const XEN_DOMCTL_GDBSX_GUESTMEMIO: u32 = 1000;
pub const XEN_DOMCTL_GDBSX_PAUSEVCPU: u32 = 1001;
pub const XEN_DOMCTL_GDBSX_UNPAUSEVCPU: u32 = 1002;
@ -242,6 +243,8 @@ pub union DomCtlValue {
pub iomem_permission: IoMemPermission,
pub irq_permission: IrqPermission,
pub assign_device: AssignDevice,
pub hvm_context: HvmContext,
pub paging_mempool: PagingMempool,
pub pad: [u8; 128],
}
@ -267,10 +270,7 @@ impl Default for CreateDomain {
CreateDomain {
ssidref: SECINITSID_DOMU,
handle: Uuid::new_v4().into_bytes(),
#[cfg(target_arch = "x86_64")]
flags: 0,
#[cfg(target_arch = "aarch64")]
flags: 1 << XEN_DOMCTL_CDF_HVM_GUEST,
iommu_opts: 0,
max_vcpus: 1,
max_evtchn_port: 1023,
@ -346,6 +346,8 @@ pub struct ArchDomainConfig {
pub misc_flags: u32,
}
pub const X86_EMU_LAPIC: u32 = 1 << 0;
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
@ -398,6 +400,16 @@ pub struct MemoryReservation {
pub domid: u16,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct AddToPhysmap {
pub domid: u16,
pub size: u16,
pub space: u32,
pub idx: u64,
pub gpfn: u64,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct MultiCallEntry {
@ -410,6 +422,7 @@ pub const XEN_MEM_POPULATE_PHYSMAP: u32 = 6;
pub const XEN_MEM_MEMORY_MAP: u32 = 10;
pub const XEN_MEM_SET_MEMORY_MAP: u32 = 13;
pub const XEN_MEM_CLAIM_PAGES: u32 = 24;
pub const XEN_MEM_ADD_TO_PHYSMAP: u32 = 7;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -439,8 +452,8 @@ impl Default for VcpuGuestContextFpuCtx {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "x86_64")]
pub struct CpuUserRegs {
#[allow(non_camel_case_types)]
pub struct x8664CpuUserRegs {
pub r15: u64,
pub r14: u64,
pub r13: u64,
@ -479,7 +492,6 @@ pub struct CpuUserRegs {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "x86_64")]
pub struct TrapInfo {
pub vector: u8,
pub flags: u8,
@ -489,11 +501,11 @@ pub struct TrapInfo {
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg(target_arch = "x86_64")]
pub struct VcpuGuestContext {
#[allow(non_camel_case_types)]
pub struct x8664VcpuGuestContext {
pub fpu_ctx: VcpuGuestContextFpuCtx,
pub flags: u64,
pub user_regs: CpuUserRegs,
pub user_regs: x8664CpuUserRegs,
pub trap_ctx: [TrapInfo; 256],
pub ldt_base: u64,
pub ldt_ents: u64,
@ -512,10 +524,9 @@ pub struct VcpuGuestContext {
pub gs_base_user: u64,
}
#[cfg(target_arch = "x86_64")]
impl Default for VcpuGuestContext {
impl Default for x8664VcpuGuestContext {
fn default() -> Self {
VcpuGuestContext {
Self {
fpu_ctx: Default::default(),
flags: 0,
user_regs: Default::default(),
@ -541,8 +552,7 @@ impl Default for VcpuGuestContext {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
pub struct CpuUserRegs {
pub struct Arm64CpuUserRegs {
pub x0: u64,
pub x1: u64,
pub x2: u64,
@ -588,10 +598,9 @@ pub struct CpuUserRegs {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
pub struct VcpuGuestContext {
pub struct Arm64VcpuGuestContext {
pub flags: u32,
pub user_regs: CpuUserRegs,
pub user_regs: x8664CpuUserRegs,
pub sctlr: u64,
pub ttbcr: u64,
pub ttbr0: u64,
@ -599,7 +608,10 @@ pub struct VcpuGuestContext {
}
pub union VcpuGuestContextAny {
pub value: VcpuGuestContext,
#[cfg(target_arch = "aarch64")]
pub value: Arm64VcpuGuestContext,
#[cfg(target_arch = "x86_64")]
pub value: x8664VcpuGuestContext,
}
#[repr(C)]
@ -628,17 +640,11 @@ pub struct E820Entry {
pub typ: u32,
}
#[cfg(target_arch = "x86_64")]
pub const E820_MAX: u32 = 1024;
#[cfg(target_arch = "x86_64")]
pub const E820_RAM: u32 = 1;
#[cfg(target_arch = "x86_64")]
pub const E820_RESERVED: u32 = 2;
#[cfg(target_arch = "x86_64")]
pub const E820_ACPI: u32 = 3;
#[cfg(target_arch = "x86_64")]
pub const E820_NVS: u32 = 4;
#[cfg(target_arch = "x86_64")]
pub const E820_UNUSABLE: u32 = 5;
pub const PHYSDEVOP_MAP_PIRQ: u64 = 13;
@ -676,3 +682,34 @@ pub struct AssignDevice {
}
pub const DOMID_IO: u32 = 0x7FF1;
pub const MEMFLAGS_POPULATE_ON_DEMAND: u32 = 1 << 16;
pub struct PodTarget {
pub target_pages: u64,
pub total_pages: u64,
pub pod_cache_pages: u64,
pub pod_entries: u64,
pub domid: u16,
}
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct HvmParam {
pub domid: u16,
pub pad: u8,
pub index: u32,
pub value: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct HvmContext {
pub size: u32,
pub buffer: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct PagingMempool {
pub size: u64,
}