feat: rework to support multiple platforms

This commit is contained in:
Alex Zenla 2024-05-05 07:27:35 -07:00
parent e8e3982b4e
commit 007f849a59
No known key found for this signature in database
GPG Key ID: 067B238899B51269
11 changed files with 369 additions and 180 deletions

View File

@ -5,7 +5,7 @@ use ipnetwork::IpNetwork;
use loopdev::LoopControl;
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::XenClient;
use xenclient::{x86pv::X86PvPlatform, XenClient};
use xenstore::{XsdClient, XsdInterface};
use self::{
@ -46,12 +46,12 @@ pub struct GuestInfo {
#[derive(Clone)]
pub struct RuntimeContext {
pub autoloop: AutoLoop,
pub xen: XenClient,
pub xen: XenClient<X86PvPlatform>,
}
impl RuntimeContext {
pub async fn new() -> Result<Self> {
let xen = XenClient::open(0).await?;
let xen = XenClient::new(0, X86PvPlatform::new()).await?;
Ok(RuntimeContext {
autoloop: AutoLoop::new(LoopControl::open()?),
xen,

View File

@ -33,7 +33,3 @@ path = "examples/domain_create.rs"
[[example]]
name = "xencall-version-capabilities"
path = "examples/version_capabilities.rs"
[[example]]
name = "xencall-vcpu-context"
path = "examples/vcpu_context.rs"

View File

@ -3,17 +3,33 @@ pub mod sys;
use crate::error::{Error, Result};
use crate::sys::{
AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext, EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall, HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap, MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL, HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP, HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE, XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO, XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT, XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION, XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT, XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP, XEN_MEM_POPULATE_PHYSMAP
AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext,
EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall,
HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PagingMempool,
PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL,
HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP,
HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE,
XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO,
XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT,
XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION,
XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT,
XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP, XEN_MEM_POPULATE_PHYSMAP,
};
use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use libc::{c_int, mmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use log::trace;
use nix::errno::Errno;
use std::ffi::{c_long, c_uint, c_ulong, c_void};
use std::sync::Arc;
use std::time::Duration;
use sys::{
E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP
E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP,
PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION,
XEN_MEM_SET_MEMORY_MAP,
};
use tokio::sync::Semaphore;
use tokio::time::sleep;
use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd;
@ -220,8 +236,8 @@ impl XenCall {
num: num as u32,
domid: domid as u16,
addr,
mfns: mfns.as_mut_ptr(),
errors: errors.as_mut_ptr(),
mfns: mfns.as_mut_ptr() as u64,
errors: errors.as_mut_ptr() as u64,
};
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
@ -230,7 +246,7 @@ impl XenCall {
return Err(Error::MmapBatchFailed(errno))?;
}
usleep(100);
sleep(Duration::from_micros(100)).await;
let mut i: usize = 0;
let mut paged: usize = 0;
@ -245,8 +261,8 @@ impl XenCall {
num: 1,
domid: domid as u16,
addr: addr + ((i as u64) << 12),
mfns: mfns.as_mut_ptr().add(i),
errors: errors.as_mut_ptr().add(i),
mfns: mfns.as_mut_ptr().add(i) as u64,
errors: errors.as_mut_ptr().add(i) as u64,
};
loop {
@ -558,6 +574,12 @@ impl XenCall {
domid: u32,
entries: Vec<E820Entry>,
) -> Result<Vec<E820Entry>> {
trace!(
"fd={} set_memory_map domid={} entries={:?}",
self.handle.as_raw_fd(),
domid,
entries
);
let mut memory_map = ForeignMemoryMap {
domid: domid as u16,
map: MemoryMap {
@ -594,24 +616,14 @@ impl XenCall {
domid: domid as u16,
};
let calls = &mut [MultiCallEntry {
op: HYPERVISOR_MEMORY_OP,
result: 0,
args: [
let code = self
.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_POPULATE_PHYSMAP as c_ulong,
addr_of_mut!(reservation) as c_ulong,
0,
0,
0,
0,
],
}];
self.multicall(calls).await?;
let code = calls[0].result;
if code > !0xfff {
return Err(Error::PopulatePhysmapFailed);
}
if code as usize > extent_starts.len() {
)
.await?;
if code as usize != extent_starts.len() {
return Err(Error::PopulatePhysmapFailed);
}
let extents = extent_starts[0..code as usize].to_vec();
@ -805,12 +817,8 @@ impl XenCall {
param.domid = domid as u16;
param.index = index;
param.value = value;
self.hypercall2(
HYPERVISOR_HVM_OP,
0,
addr_of_mut!(param) as c_ulong,
)
.await?;
self.hypercall2(HYPERVISOR_HVM_OP, 0, addr_of_mut!(param) as c_ulong)
.await?;
Ok(())
}
@ -828,7 +836,7 @@ impl XenCall {
hvm_context: HvmContext {
size: buffer.as_ref().map(|x| x.len()).unwrap_or(0) as u32,
buffer: buffer.map(|x| x.as_mut_ptr()).unwrap_or(null_mut()) as u64,
}
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
@ -850,11 +858,31 @@ impl XenCall {
hvm_context: HvmContext {
size: buffer.len() as u32,
buffer: buffer.as_ptr() as u64,
}
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.hvm_context.size })
}
pub async fn set_paging_mempool_size(&self, domid: u32, size: u64) -> Result<()> {
trace!(
"domctl fd={} set_paging_mempool_size domid={} size={}",
self.handle.as_raw_fd(),
domid,
size,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
paging_mempool: PagingMempool { size },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
}

View File

@ -35,8 +35,8 @@ pub struct MmapBatch {
pub num: u32,
pub domid: u16,
pub addr: u64,
pub mfns: *mut u64,
pub errors: *mut c_int,
pub mfns: u64,
pub errors: u64,
}
#[repr(C)]
@ -200,6 +200,7 @@ pub const XEN_DOMCTL_PSR_CAT_OP: u32 = 78;
pub const XEN_DOMCTL_SOFT_RESET: u32 = 79;
pub const XEN_DOMCTL_SET_GNTTAB_LIMITS: u32 = 80;
pub const XEN_DOMCTL_VUART_OP: u32 = 81;
pub const XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE: u32 = 86;
pub const XEN_DOMCTL_GDBSX_GUESTMEMIO: u32 = 1000;
pub const XEN_DOMCTL_GDBSX_PAUSEVCPU: u32 = 1001;
pub const XEN_DOMCTL_GDBSX_UNPAUSEVCPU: u32 = 1002;
@ -243,6 +244,7 @@ pub union DomCtlValue {
pub irq_permission: IrqPermission,
pub assign_device: AssignDevice,
pub hvm_context: HvmContext,
pub paging_mempool: PagingMempool,
pub pad: [u8; 128],
}
@ -270,8 +272,6 @@ impl Default for CreateDomain {
handle: Uuid::new_v4().into_bytes(),
#[cfg(target_arch = "x86_64")]
flags: 0,
#[cfg(target_arch = "aarch64")]
flags: 1 << XEN_DOMCTL_CDF_HVM_GUEST,
iommu_opts: 0,
max_vcpus: 1,
max_evtchn_port: 1023,
@ -698,3 +698,9 @@ pub struct HvmContext {
pub size: u32,
pub buffer: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct PagingMempool {
pub size: u64,
}

View File

@ -1,6 +1,7 @@
use std::{env, process};
use tokio::fs;
use xenclient::error::Result;
use xenclient::x86pvh::X86PvhPlatform;
use xenclient::{DomainConfig, XenClient};
#[tokio::main]
@ -14,7 +15,7 @@ async fn main() -> Result<()> {
}
let kernel_image_path = args.get(1).expect("argument not specified");
let initrd_path = args.get(2).expect("argument not specified");
let client = XenClient::open(0).await?;
let client = XenClient::new(0, X86PvhPlatform::new()).await?;
let config = DomainConfig {
backend_domid: 0,
name: "xenclient-test".to_string(),

View File

@ -2,7 +2,7 @@ use std::slice;
use log::debug;
use slice_copy::copy;
use xencall::XenCall;
use xencall::{sys::CreateDomain, XenCall};
use crate::{
error::{Error, Result},
@ -18,7 +18,7 @@ pub struct BootSetup<I: BootImageLoader, P: BootSetupPlatform> {
pub dtb: Option<Vec<u8>>,
}
#[derive(Debug, Default)]
#[derive(Debug, Default, Clone)]
pub struct DomainSegment {
pub vstart: u64,
pub vend: u64,
@ -40,7 +40,7 @@ pub struct BootDomain {
pub image_info: BootImageInfo,
pub phys: PhysicalPages,
pub store_evtchn: u32,
pub xenstore_mfn: u64,
pub store_mfn: u64,
pub initrd_segment: DomainSegment,
pub consoles: Vec<(u32, u64)>,
}
@ -174,9 +174,11 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
phys: PhysicalPages::new(self.call.clone(), self.domid, self.platform.page_shift()),
initrd_segment: DomainSegment::default(),
store_evtchn: 0,
xenstore_mfn: 0,
store_mfn: 0,
};
self.platform.initialize_early(&mut domain).await?;
let mut initrd_segment = if !domain.image_info.unmapped_initrd {
Some(domain.alloc_module(initrd).await?)
} else {
@ -248,12 +250,15 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
}
#[async_trait::async_trait]
pub trait BootSetupPlatform {
pub trait BootSetupPlatform: Clone {
fn create_domain(&self) -> CreateDomain;
fn page_size(&self) -> u64;
fn page_shift(&self) -> u64;
fn needs_early_kernel(&self) -> bool;
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()>;
async fn initialize_early(&mut self, domain: &mut BootDomain) -> Result<()>;
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()>;
async fn alloc_page_tables(&mut self, domain: &mut BootDomain)
-> Result<Option<DomainSegment>>;

View File

@ -7,7 +7,7 @@ pub mod sys;
use crate::boot::{BootDomain, BootSetup};
use crate::elfloader::ElfImageLoader;
use crate::error::{Error, Result};
use crate::x86pvh::X86PvhPlatform;
use boot::BootSetupPlatform;
use log::{debug, trace, warn};
use pci::{PciBdf, XenPciBackend};
use sys::XEN_PAGE_SHIFT;
@ -16,12 +16,10 @@ use tokio::time::timeout;
use std::collections::HashMap;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use uuid::Uuid;
use xencall::sys::{
CreateDomain, DOMCTL_DEV_RDM_RELAXED, XEN_DOMCTL_CDF_HAP, XEN_DOMCTL_CDF_HVM_GUEST,
XEN_DOMCTL_CDF_IOMMU, XEN_X86_EMU_LAPIC,
};
use xencall::sys::{CreateDomain, DOMCTL_DEV_RDM_RELAXED};
use xencall::XenCall;
use xenstore::{
XsPermission, XsdClient, XsdInterface, XS_PERM_NONE, XS_PERM_READ, XS_PERM_READ_WRITE,
@ -32,9 +30,10 @@ pub mod x86pv;
pub mod x86pvh;
#[derive(Clone)]
pub struct XenClient {
pub struct XenClient<P: BootSetupPlatform> {
pub store: XsdClient,
call: XenCall,
platform: Arc<P>,
}
#[derive(Clone, Debug)]
@ -135,27 +134,20 @@ pub struct CreatedDomain {
pub channels: Vec<CreatedChannel>,
}
impl XenClient {
pub async fn open(current_domid: u32) -> Result<XenClient> {
impl<P: BootSetupPlatform> XenClient<P> {
pub async fn new(current_domid: u32, platform: P) -> Result<XenClient<P>> {
let store = XsdClient::open().await?;
let call = XenCall::open(current_domid)?;
Ok(XenClient { store, call })
Ok(XenClient {
store,
call,
platform: Arc::new(platform),
})
}
pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> {
let mut domain = CreateDomain {
max_vcpus: config.max_vcpus,
..Default::default()
};
let mut domain = self.platform.create_domain();
domain.max_vcpus = config.max_vcpus;
if cfg!(target_arch = "aarch64") {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
} else {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP | XEN_DOMCTL_CDF_IOMMU;
domain.arch_domain_config.emulation_flags = XEN_X86_EMU_LAPIC;
}
let domid = self.call.create_domain(domain).await?;
match self.init(domid, &domain, config).await {
Ok(created) => Ok(created),
@ -171,18 +163,18 @@ impl XenClient {
async fn init(
&self,
domid: u32,
domain: &CreateDomain,
created: &CreateDomain,
config: &DomainConfig,
) -> Result<CreatedDomain> {
trace!(
"XenClient init domid={} domain={:?} config={:?}",
domid,
domain,
created,
config
);
let backend_dom_path = self.store.get_domain_path(0).await?;
let dom_path = self.store.get_domain_path(domid).await?;
let uuid_string = Uuid::from_bytes(domain.handle).to_string();
let uuid_string = Uuid::from_bytes(created.handle).to_string();
let vm_path = format!("/vm/{}", uuid_string);
let ro_perm = &[
@ -263,7 +255,7 @@ impl XenClient {
tx.write_string(
format!("{}/uuid", vm_path).as_str(),
&Uuid::from_bytes(domain.handle).to_string(),
&Uuid::from_bytes(created.handle).to_string(),
)
.await?;
tx.write_string(format!("{}/name", dom_path).as_str(), &config.name)
@ -285,19 +277,16 @@ impl XenClient {
}
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
let xenstore_evtchn: u32;
let xenstore_mfn: u64;
self.call
.set_max_mem(domid, (config.mem_mb * 1024) + 1024)
.await?;
let mut domain: BootDomain;
{
let loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
let mut boot =
BootSetup::new(self.call.clone(), domid, X86PvhPlatform::new(), loader, None);
let platform = (*self.platform).clone();
let mut boot = BootSetup::new(self.call.clone(), domid, platform, loader, None);
domain = boot.initialize(&config.initrd, config.mem_mb).await?;
boot.boot(&mut domain, &config.cmdline).await?;
xenstore_evtchn = domain.store_evtchn;
xenstore_mfn = domain.xenstore_mfn;
}
{
@ -325,15 +314,15 @@ impl XenClient {
tx.write_string(format!("{}/domid", dom_path).as_str(), &domid.to_string())
.await?;
tx.write_string(format!("{}/type", dom_path).as_str(), "PVH")
.await?;
.await?;
tx.write_string(
format!("{}/store/port", dom_path).as_str(),
&xenstore_evtchn.to_string(),
&domain.store_evtchn.to_string(),
)
.await?;
tx.write_string(
format!("{}/store/ring-ref", dom_path).as_str(),
&xenstore_mfn.to_string(),
&domain.store_mfn.to_string(),
)
.await?;
for i in 0..config.max_vcpus {
@ -348,7 +337,7 @@ impl XenClient {
}
if !self
.store
.introduce_domain(domid, xenstore_mfn, xenstore_evtchn)
.introduce_domain(domid, domain.store_mfn, domain.store_evtchn)
.await?
{
return Err(Error::IntroduceDomainFailed);

View File

@ -1,7 +1,7 @@
use crate::error::Result;
use crate::sys::{XEN_PAGE_SHIFT, XEN_PAGE_SIZE};
use crate::sys::XEN_PAGE_SHIFT;
use crate::Error;
use libc::{memset, munmap};
use libc::munmap;
use log::debug;
use nix::errno::Errno;
use std::ffi::c_void;
@ -124,11 +124,20 @@ impl PhysicalPages {
Ok(addr)
}
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<PhysicalPage> {
let num = (size >> XEN_PAGE_SHIFT) as usize;
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
let count = (size >> XEN_PAGE_SHIFT) as usize;
let mut entries = vec![MmapEntry::default(); count];
for (i, entry) in entries.iter_mut().enumerate() {
entry.mfn = mfn + i as u64;
}
let chunk_size = 1 << XEN_PAGE_SHIFT;
let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
let num = num_per_entry * count;
let mut pfns = vec![u64::MAX; num];
for (i, item) in pfns.iter_mut().enumerate().take(num) {
*item = mfn + i as u64;
for i in 0..count {
for j in 0..num_per_entry {
pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
}
}
let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
@ -148,24 +157,21 @@ impl PhysicalPages {
let page = PhysicalPage {
pfn: mfn,
ptr: addr,
count: num as u64,
count: count as u64,
};
debug!(
"alloc_mfn {:#x}+{:#x} at {:#x}",
page.pfn, page.count, page.ptr
);
self.pages.push(page.clone());
Ok(page)
self.pages.push(page);
Ok(addr)
}
pub async fn clear_pages(&mut self, pfn: u64, count: u64) -> Result<()> {
let mfn = if !self.p2m.is_empty() {
self.p2m[pfn as usize]
} else {
pfn
let ptr = self.pfn_to_ptr(pfn, count).await?;
let slice = unsafe {
slice::from_raw_parts_mut(ptr as *mut u8, (count * (1 << self.page_shift)) as usize)
};
let page = self.map_foreign_pages(mfn, count << XEN_PAGE_SHIFT).await?;
let slice = unsafe { slice::from_raw_parts_mut(page.ptr as *mut u8, (count << XEN_PAGE_SHIFT) as usize) };
slice.fill(0);
Ok(())
}

View File

@ -142,3 +142,5 @@ pub const HVM_PARAM_CONSOLE_EVTCHN: u32 = 8;
pub const HVM_PARAM_PAGING_RING_PFN: u32 = 27;
pub const HVM_PARAM_MONITOR_RING_PFN: u32 = 28;
pub const HVM_PARAM_SHARING_RING_PFN: u32 = 29;
pub const HVM_PARAM_TIMER_MODE: u32 = 10;
pub const HVM_PARAM_ALTP2M: u32 = 35;

View File

@ -9,7 +9,8 @@ use log::{debug, trace};
use nix::errno::Errno;
use slice_copy::copy;
use xencall::sys::{
x8664VcpuGuestContext, E820Entry, VcpuGuestContextAny, E820_MAX, E820_RAM, E820_UNUSABLE, MMUEXT_PIN_L4_TABLE
x8664VcpuGuestContext, CreateDomain, E820Entry, VcpuGuestContextAny, E820_MAX, E820_RAM,
E820_UNUSABLE, MMUEXT_PIN_L4_TABLE, XEN_DOMCTL_CDF_IOMMU,
};
use crate::{
@ -141,7 +142,7 @@ struct VmemRange {
_nid: u32,
}
#[derive(Default)]
#[derive(Default, Clone)]
pub struct X86PvPlatform {
table: PageTable,
p2m_segment: Option<DomainSegment>,
@ -433,6 +434,13 @@ impl X86PvPlatform {
#[async_trait::async_trait]
impl BootSetupPlatform for X86PvPlatform {
fn create_domain(&self) -> CreateDomain {
CreateDomain {
flags: XEN_DOMCTL_CDF_IOMMU,
..Default::default()
}
}
fn page_size(&self) -> u64 {
X86_PAGE_SIZE
}
@ -445,7 +453,11 @@ impl BootSetupPlatform for X86PvPlatform {
false
}
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()> {
async fn initialize_early(&mut self, _: &mut BootDomain) -> Result<()> {
Ok(())
}
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()> {
domain.call.set_address_size(domain.domid, 64).await?;
domain
.call
@ -695,7 +707,7 @@ impl BootSetupPlatform for X86PvPlatform {
}
self.start_info_segment = Some(domain.alloc_page()?);
self.xenstore_segment = Some(domain.alloc_page()?);
domain.xenstore_mfn = domain.phys.p2m[self.xenstore_segment.as_ref().unwrap().pfn as usize];
domain.store_mfn = domain.phys.p2m[self.xenstore_segment.as_ref().unwrap().pfn as usize];
let evtchn = domain.call.evtchn_alloc_unbound(domain.domid, 0).await?;
let page = domain.alloc_page()?;
domain
@ -726,7 +738,7 @@ impl BootSetupPlatform for X86PvPlatform {
let info = domain
.phys
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
.await?.ptr as *mut SharedInfo;
.await? as *mut SharedInfo;
unsafe {
let size = size_of::<SharedInfo>();
let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
@ -861,9 +873,10 @@ impl BootSetupPlatform for X86PvPlatform {
vcpu.kernel_ss = vcpu.user_regs.ss as u64;
vcpu.kernel_sp = vcpu.user_regs.rsp;
trace!("vcpu context: {:?}", vcpu);
domain.call.set_vcpu_context(domain.domid, 0, VcpuGuestContextAny {
value: vcpu,
}).await?;
domain
.call
.set_vcpu_context(domain.domid, 0, VcpuGuestContextAny { value: vcpu })
.await?;
Ok(())
}

View File

@ -1,19 +1,25 @@
use std::{
mem::{size_of, MaybeUninit}, os::raw::{c_char, c_void}, ptr::addr_of_mut, slice
mem::{size_of, MaybeUninit},
os::raw::{c_char, c_void},
ptr::addr_of_mut,
slice,
};
use libc::munmap;
use log::trace;
use nix::errno::Errno;
use xencall::sys::{
x8664VcpuGuestContext, E820Entry, E820_RAM, MEMFLAGS_POPULATE_ON_DEMAND
ArchDomainConfig, CreateDomain, E820Entry, E820_RAM, E820_RESERVED,
MEMFLAGS_POPULATE_ON_DEMAND, XEN_DOMCTL_CDF_HAP, XEN_DOMCTL_CDF_HVM_GUEST,
XEN_DOMCTL_CDF_IOMMU, XEN_X86_EMU_LAPIC,
};
use crate::{
boot::{BootDomain, BootSetupPlatform, DomainSegment},
error::{Error, Result},
sys::{
GrantEntry, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_CONSOLE_PFN, HVM_PARAM_IOREQ_PFN, HVM_PARAM_MONITOR_RING_PFN, HVM_PARAM_PAGING_RING_PFN, HVM_PARAM_SHARING_RING_PFN, HVM_PARAM_STORE_PFN, SUPERPAGE_1GB_NR_PFNS, SUPERPAGE_1GB_SHIFT, SUPERPAGE_2MB_NR_PFNS, SUPERPAGE_2MB_SHIFT, SUPERPAGE_BATCH_SIZE, VGCF_IN_KERNEL, VGCF_ONLINE, XEN_PAGE_SHIFT
GrantEntry, HVM_PARAM_ALTP2M, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_CONSOLE_PFN,
HVM_PARAM_IOREQ_PFN, HVM_PARAM_MONITOR_RING_PFN, HVM_PARAM_PAGING_RING_PFN,
HVM_PARAM_SHARING_RING_PFN, HVM_PARAM_STORE_PFN, HVM_PARAM_TIMER_MODE, XEN_PAGE_SHIFT,
},
};
@ -240,13 +246,16 @@ struct VmemRange {
_nid: u32,
}
#[derive(Default)]
#[derive(Default, Clone)]
pub struct X86PvhPlatform {
start_info_segment: Option<DomainSegment>,
lowmem_end: u64,
highmem_end: u64,
mmio_start: u64,
}
const X86_CR0_PE: u64 = 0x01;
const X86_CR0_ET: u64 = 0x10;
const X86_CR0_ET: u64 = 0x10;
impl X86PvhPlatform {
pub fn new() -> Self {
@ -255,19 +264,35 @@ impl X86PvhPlatform {
}
}
pub fn construct_memmap(&self, mem_size_bytes: u64) -> Result<Vec<E820Entry>> {
let entries = vec![
E820Entry {
addr: 0,
size: mem_size_bytes,
typ: E820_RAM
},
E820Entry {
addr: (X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << XEN_PAGE_SHIFT,
size: X86_HVM_NR_SPECIAL_PAGES << XEN_PAGE_SHIFT,
typ: E820_RAM
},
];
pub fn construct_memmap(&self) -> Result<Vec<E820Entry>> {
let mut entries = Vec::new();
let highmem_size = if self.highmem_end > 0 {
self.highmem_end - (1u64 << 32)
} else {
0
};
let lowmem_start = 0u64;
entries.push(E820Entry {
addr: lowmem_start,
size: self.lowmem_end - lowmem_start,
typ: E820_RAM,
});
entries.push(E820Entry {
addr: (X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << XEN_PAGE_SHIFT,
size: X86_HVM_NR_SPECIAL_PAGES << XEN_PAGE_SHIFT,
typ: E820_RESERVED,
});
if highmem_size > 0 {
entries.push(E820Entry {
addr: 1u64 << 32,
size: highmem_size,
typ: E820_RAM,
});
}
Ok(entries)
}
@ -280,6 +305,17 @@ impl X86PvhPlatform {
#[async_trait::async_trait]
impl BootSetupPlatform for X86PvhPlatform {
fn create_domain(&self) -> CreateDomain {
CreateDomain {
flags: XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP | XEN_DOMCTL_CDF_IOMMU,
arch_domain_config: ArchDomainConfig {
emulation_flags: XEN_X86_EMU_LAPIC,
..Default::default()
},
..Default::default()
}
}
fn page_size(&self) -> u64 {
X86_PAGE_SIZE
}
@ -292,7 +328,49 @@ impl BootSetupPlatform for X86PvhPlatform {
false
}
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()> {
async fn initialize_early(&mut self, domain: &mut BootDomain) -> Result<()> {
let mut memory_start =
(X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << self.page_shift();
memory_start = memory_start.min(LAPIC_BASE_ADDRESS);
memory_start = memory_start.min(ACPI_INFO_PHYSICAL_ADDRESS);
let mmio_size = (4 * 1024 * 1024 * 1024) - memory_start;
let mut lowmem_end = domain.total_pages << self.page_shift();
let mut highmem_end = 0u64;
let mmio_start = (1u64 << 32) - mmio_size;
if lowmem_end > mmio_start {
highmem_end = (1 << 32) + (lowmem_end - mmio_start);
lowmem_end = mmio_start;
}
self.lowmem_end = lowmem_end;
self.highmem_end = highmem_end;
self.mmio_start = mmio_start;
domain
.call
.set_hvm_param(domain.domid, HVM_PARAM_TIMER_MODE, 1)
.await?;
domain
.call
.set_hvm_param(domain.domid, HVM_PARAM_ALTP2M, 0)
.await?;
domain
.call
.set_paging_mempool_size(domain.domid, 1024 << 12)
.await?;
let memmap = self.construct_memmap()?;
domain
.call
.set_memory_map(domain.domid, memmap.clone())
.await?;
Ok(())
}
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()> {
domain
.call
.claim_pages(domain.domid, domain.total_pages)
.await?;
let memflags = if domain.target_pages > domain.total_pages {
MEMFLAGS_POPULATE_ON_DEMAND
} else {
@ -300,13 +378,21 @@ impl BootSetupPlatform for X86PvhPlatform {
};
let mut vmemranges: Vec<VmemRange> = Vec::new();
let stub = VmemRange {
vmemranges.push(VmemRange {
start: 0,
end: domain.total_pages << self.page_shift(),
end: self.lowmem_end,
_flags: 0,
_nid: 0,
};
vmemranges.push(stub);
});
if self.highmem_end > (1u64 << 32) {
vmemranges.push(VmemRange {
start: 1u64 << 32,
end: self.highmem_end,
_flags: 0,
_nid: 0,
});
}
let mut p2m_size: u64 = 0;
let mut total: u64 = 0;
@ -320,8 +406,6 @@ impl BootSetupPlatform for X86PvhPlatform {
}
for range in &vmemranges {
let memflags = memflags;
let end_pages = range.end >> self.page_shift();
let mut cur_pages = range.start >> self.page_shift();
@ -334,26 +418,25 @@ impl BootSetupPlatform for X86PvhPlatform {
extents[i as usize] = cur_pages + i;
}
let _ = domain.call.populate_physmap(domain.domid, count, 0 as u32, memflags, &extents).await?;
cur_pages += count as u64;
let _ = domain
.call
.populate_physmap(domain.domid, count, 0_u32, memflags, &extents)
.await?;
cur_pages += count;
}
}
}
domain.call.claim_pages(domain.domid, 0).await?;
Ok(())
}
async fn alloc_p2m_segment(
&mut self,
_: &mut BootDomain,
) -> Result<Option<DomainSegment>> {
async fn alloc_p2m_segment(&mut self, _: &mut BootDomain) -> Result<Option<DomainSegment>> {
Ok(None)
}
async fn alloc_page_tables(
&mut self,
_: &mut BootDomain,
) -> Result<Option<DomainSegment>> {
async fn alloc_page_tables(&mut self, _: &mut BootDomain) -> Result<Option<DomainSegment>> {
Ok(None)
}
@ -366,23 +449,81 @@ impl BootSetupPlatform for X86PvhPlatform {
}
async fn alloc_magic_pages(&mut self, domain: &mut BootDomain) -> Result<()> {
let memmap = self.construct_memmap(domain.total_pages << XEN_PAGE_SHIFT)?;
domain.call.set_memory_map(domain.domid, memmap.clone()).await?;
let memmap = self.construct_memmap()?;
let mut special_array = vec![0u64; X86_HVM_NR_SPECIAL_PAGES as usize];
for i in 0..X86_HVM_NR_SPECIAL_PAGES {
special_array[i as usize] = special_pfn(i as u32);
}
let pages = domain.call.populate_physmap(domain.domid, X86_HVM_NR_SPECIAL_PAGES, 0, 0, &special_array).await?;
println!("{:?}", pages);
domain.phys.clear_pages(special_pfn(0), X86_HVM_NR_SPECIAL_PAGES).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_STORE_PFN, special_pfn(SPECIALPAGE_XENSTORE)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_BUFIOREQ_PFN, special_pfn(SPECIALPAGE_BUFIOREQ)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_IOREQ_PFN, special_pfn(SPECIALPAGE_IOREQ)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_CONSOLE_PFN, special_pfn(SPECIALPAGE_CONSOLE)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_PAGING_RING_PFN, special_pfn(SPECIALPAGE_PAGING)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_MONITOR_RING_PFN, special_pfn(SPECIALPAGE_ACCESS)).await?;
domain.call.set_hvm_param(domain.domid, HVM_PARAM_SHARING_RING_PFN, special_pfn(SPECIALPAGE_SHARING)).await?;
domain
.call
.populate_physmap(
domain.domid,
special_array.len() as u64,
0,
0,
&special_array,
)
.await?;
domain
.phys
.clear_pages(special_pfn(0), special_array.len() as u64)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_STORE_PFN,
special_pfn(SPECIALPAGE_XENSTORE),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_BUFIOREQ_PFN,
special_pfn(SPECIALPAGE_BUFIOREQ),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_IOREQ_PFN,
special_pfn(SPECIALPAGE_IOREQ),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_CONSOLE_PFN,
special_pfn(SPECIALPAGE_CONSOLE),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_PAGING_RING_PFN,
special_pfn(SPECIALPAGE_PAGING),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_MONITOR_RING_PFN,
special_pfn(SPECIALPAGE_ACCESS),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_SHARING_RING_PFN,
special_pfn(SPECIALPAGE_SHARING),
)
.await?;
let mut start_info_size = size_of::<HvmStartInfo>();
@ -391,26 +532,17 @@ impl BootSetupPlatform for X86PvhPlatform {
self.start_info_segment = Some(domain.alloc_segment(0, start_info_size as u64).await?);
domain.consoles.push((0, special_pfn(SPECIALPAGE_CONSOLE)));
domain.xenstore_mfn = special_pfn(SPECIALPAGE_XENSTORE);
domain.store_mfn = special_pfn(SPECIALPAGE_XENSTORE);
Ok(())
}
async fn setup_shared_info(
&mut self,
_: &mut BootDomain,
_: u64,
) -> Result<()> {
async fn setup_shared_info(&mut self, _: &mut BootDomain, _: u64) -> Result<()> {
Ok(())
}
async fn setup_start_info(
&mut self,
_: &mut BootDomain,
_: &str,
_: u64,
) -> Result<()> {
Ok(())
async fn setup_start_info(&mut self, _: &mut BootDomain, _: &str, _: u64) -> Result<()> {
Ok(())
}
async fn bootlate(&mut self, _: &mut BootDomain) -> Result<()> {
@ -420,9 +552,18 @@ impl BootSetupPlatform for X86PvhPlatform {
async fn vcpu(&mut self, domain: &mut BootDomain) -> Result<()> {
let size = domain.call.get_hvm_context(domain.domid, None).await?;
let mut full_context = vec![0u8; size as usize];
domain.call.get_hvm_context(domain.domid, Some(&mut full_context)).await?;
domain
.call
.get_hvm_context(domain.domid, Some(&mut full_context))
.await?;
let mut ctx: BspCtx = unsafe { MaybeUninit::zeroed().assume_init() };
unsafe { std::ptr::copy(full_context.as_ptr(), addr_of_mut!(ctx) as *mut u8, size_of::<HvmSaveDescriptor>() + size_of::<HvmSaveHeader>()) };
unsafe {
std::ptr::copy(
full_context.as_ptr(),
addr_of_mut!(ctx) as *mut u8,
size_of::<HvmSaveDescriptor>() + size_of::<HvmSaveHeader>(),
)
};
ctx.cpu_d.instance = 0;
ctx.cpu.cs_base = 0;
ctx.cpu.ds_base = 0;
@ -466,7 +607,7 @@ impl BootSetupPlatform for X86PvhPlatform {
entries[0].frame = console_gfn as u32;
entries[1].flags = 1 << 0;
entries[1].domid = 0;
entries[1].frame = domain.xenstore_mfn as u32;
entries[1].frame = domain.store_mfn as u32;
unsafe {
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
if result != 0 {
@ -485,10 +626,12 @@ const X86_HVM_NR_SPECIAL_PAGES: u64 = 8;
const X86_HVM_END_SPECIAL_REGION: u64 = 0xff000;
const SPECIALPAGE_PAGING: u32 = 0;
const SPECIALPAGE_ACCESS: u32 = 1;
const SPECIALPAGE_SHARING: u32 = 2;
const SPECIALPAGE_ACCESS: u32 = 1;
const SPECIALPAGE_SHARING: u32 = 2;
const SPECIALPAGE_BUFIOREQ: u32 = 3;
const SPECIALPAGE_XENSTORE: u32 = 4;
const SPECIALPAGE_IOREQ : u32 = 5;
const SPECIALPAGE_IDENT_PT: u32 = 6;
const SPECIALPAGE_CONSOLE: u32 = 7;
const SPECIALPAGE_IOREQ: u32 = 5;
const _SPECIALPAGE_IDENT_PT: u32 = 6;
const SPECIALPAGE_CONSOLE: u32 = 7;
const LAPIC_BASE_ADDRESS: u64 = 0xfee00000;
const ACPI_INFO_PHYSICAL_ADDRESS: u64 = 0xFC000000;