feat: rework to support multiple platforms

This commit is contained in:
Alex Zenla
2024-05-05 07:27:35 -07:00
parent 2f69f339bc
commit e60fb29f52
11 changed files with 366 additions and 177 deletions

View File

@ -7,7 +7,7 @@ use log::error;
use loopdev::LoopControl; use loopdev::LoopControl;
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use uuid::Uuid; use uuid::Uuid;
use xenclient::XenClient; use xenclient::{x86pv::X86PvPlatform, XenClient};
use xenstore::{XsdClient, XsdInterface}; use xenstore::{XsdClient, XsdInterface};
use self::{ use self::{
@ -49,13 +49,13 @@ pub struct GuestInfo {
#[derive(Clone)] #[derive(Clone)]
pub struct RuntimeContext { pub struct RuntimeContext {
pub autoloop: AutoLoop, pub autoloop: AutoLoop,
pub xen: XenClient, pub xen: XenClient<X86PvPlatform>,
pub ipvendor: IpVendor, pub ipvendor: IpVendor,
} }
impl RuntimeContext { impl RuntimeContext {
pub async fn new(host_uuid: Uuid) -> Result<Self> { pub async fn new(host_uuid: Uuid) -> Result<Self> {
let xen = XenClient::open(0).await?; let xen = XenClient::open(0, X86PvPlatform::new()).await?;
let ipv4_network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?; let ipv4_network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
let ipv6_network = Ipv6Network::from_str("fdd4:1476:6c7e::/48")?; let ipv6_network = Ipv6Network::from_str("fdd4:1476:6c7e::/48")?;
let ipvend = let ipvend =

View File

@ -33,7 +33,3 @@ path = "examples/domain_create.rs"
[[example]] [[example]]
name = "xencall-version-capabilities" name = "xencall-version-capabilities"
path = "examples/version_capabilities.rs" path = "examples/version_capabilities.rs"
[[example]]
name = "xencall-vcpu-context"
path = "examples/vcpu_context.rs"

View File

@ -3,17 +3,33 @@ pub mod sys;
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use crate::sys::{ use crate::sys::{
AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext, EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall, HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap, MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL, HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP, HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE, XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO, XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT, XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION, XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT, XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP, XEN_MEM_POPULATE_PHYSMAP AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext,
EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall,
HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PagingMempool,
PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL,
HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP,
HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE,
XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO,
XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT,
XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION,
XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT,
XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP, XEN_MEM_POPULATE_PHYSMAP,
}; };
use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE}; use libc::{c_int, mmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use log::trace; use log::trace;
use nix::errno::Errno; use nix::errno::Errno;
use std::ffi::{c_long, c_uint, c_ulong, c_void}; use std::ffi::{c_long, c_uint, c_ulong, c_void};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use sys::{ use sys::{
E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP,
PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION,
XEN_MEM_SET_MEMORY_MAP,
}; };
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use tokio::time::sleep;
use std::fs::{File, OpenOptions}; use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd; use std::os::fd::AsRawFd;
@ -220,8 +236,8 @@ impl XenCall {
num: num as u32, num: num as u32,
domid: domid as u16, domid: domid as u16,
addr, addr,
mfns: mfns.as_mut_ptr(), mfns: mfns.as_mut_ptr() as u64,
errors: errors.as_mut_ptr(), errors: errors.as_mut_ptr() as u64,
}; };
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch); let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
@ -230,7 +246,7 @@ impl XenCall {
return Err(Error::MmapBatchFailed(errno))?; return Err(Error::MmapBatchFailed(errno))?;
} }
usleep(100); sleep(Duration::from_micros(100)).await;
let mut i: usize = 0; let mut i: usize = 0;
let mut paged: usize = 0; let mut paged: usize = 0;
@ -245,8 +261,8 @@ impl XenCall {
num: 1, num: 1,
domid: domid as u16, domid: domid as u16,
addr: addr + ((i as u64) << 12), addr: addr + ((i as u64) << 12),
mfns: mfns.as_mut_ptr().add(i), mfns: mfns.as_mut_ptr().add(i) as u64,
errors: errors.as_mut_ptr().add(i), errors: errors.as_mut_ptr().add(i) as u64,
}; };
loop { loop {
@ -558,6 +574,12 @@ impl XenCall {
domid: u32, domid: u32,
entries: Vec<E820Entry>, entries: Vec<E820Entry>,
) -> Result<Vec<E820Entry>> { ) -> Result<Vec<E820Entry>> {
trace!(
"fd={} set_memory_map domid={} entries={:?}",
self.handle.as_raw_fd(),
domid,
entries
);
let mut memory_map = ForeignMemoryMap { let mut memory_map = ForeignMemoryMap {
domid: domid as u16, domid: domid as u16,
map: MemoryMap { map: MemoryMap {
@ -594,24 +616,14 @@ impl XenCall {
domid: domid as u16, domid: domid as u16,
}; };
let calls = &mut [MultiCallEntry { let code = self
op: HYPERVISOR_MEMORY_OP, .hypercall2(
result: 0, HYPERVISOR_MEMORY_OP,
args: [
XEN_MEM_POPULATE_PHYSMAP as c_ulong, XEN_MEM_POPULATE_PHYSMAP as c_ulong,
addr_of_mut!(reservation) as c_ulong, addr_of_mut!(reservation) as c_ulong,
0, )
0, .await?;
0, if code as usize != extent_starts.len() {
0,
],
}];
self.multicall(calls).await?;
let code = calls[0].result;
if code > !0xfff {
return Err(Error::PopulatePhysmapFailed);
}
if code as usize > extent_starts.len() {
return Err(Error::PopulatePhysmapFailed); return Err(Error::PopulatePhysmapFailed);
} }
let extents = extent_starts[0..code as usize].to_vec(); let extents = extent_starts[0..code as usize].to_vec();
@ -811,11 +823,7 @@ impl XenCall {
param.domid = domid as u16; param.domid = domid as u16;
param.index = index; param.index = index;
param.value = value; param.value = value;
self.hypercall2( self.hypercall2(HYPERVISOR_HVM_OP, 0, addr_of_mut!(param) as c_ulong)
HYPERVISOR_HVM_OP,
0,
addr_of_mut!(param) as c_ulong,
)
.await?; .await?;
Ok(()) Ok(())
} }
@ -834,7 +842,7 @@ impl XenCall {
hvm_context: HvmContext { hvm_context: HvmContext {
size: buffer.as_ref().map(|x| x.len()).unwrap_or(0) as u32, size: buffer.as_ref().map(|x| x.len()).unwrap_or(0) as u32,
buffer: buffer.map(|x| x.as_mut_ptr()).unwrap_or(null_mut()) as u64, buffer: buffer.map(|x| x.as_mut_ptr()).unwrap_or(null_mut()) as u64,
} },
}, },
}; };
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong) self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
@ -856,11 +864,31 @@ impl XenCall {
hvm_context: HvmContext { hvm_context: HvmContext {
size: buffer.len() as u32, size: buffer.len() as u32,
buffer: buffer.as_ptr() as u64, buffer: buffer.as_ptr() as u64,
} },
}, },
}; };
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong) self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?; .await?;
Ok(unsafe { domctl.value.hvm_context.size }) Ok(unsafe { domctl.value.hvm_context.size })
} }
pub async fn set_paging_mempool_size(&self, domid: u32, size: u64) -> Result<()> {
trace!(
"domctl fd={} set_paging_mempool_size domid={} size={}",
self.handle.as_raw_fd(),
domid,
size,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
paging_mempool: PagingMempool { size },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
} }

View File

@ -35,8 +35,8 @@ pub struct MmapBatch {
pub num: u32, pub num: u32,
pub domid: u16, pub domid: u16,
pub addr: u64, pub addr: u64,
pub mfns: *mut u64, pub mfns: u64,
pub errors: *mut c_int, pub errors: u64,
} }
#[repr(C)] #[repr(C)]
@ -200,6 +200,7 @@ pub const XEN_DOMCTL_PSR_CAT_OP: u32 = 78;
pub const XEN_DOMCTL_SOFT_RESET: u32 = 79; pub const XEN_DOMCTL_SOFT_RESET: u32 = 79;
pub const XEN_DOMCTL_SET_GNTTAB_LIMITS: u32 = 80; pub const XEN_DOMCTL_SET_GNTTAB_LIMITS: u32 = 80;
pub const XEN_DOMCTL_VUART_OP: u32 = 81; pub const XEN_DOMCTL_VUART_OP: u32 = 81;
pub const XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE: u32 = 86;
pub const XEN_DOMCTL_GDBSX_GUESTMEMIO: u32 = 1000; pub const XEN_DOMCTL_GDBSX_GUESTMEMIO: u32 = 1000;
pub const XEN_DOMCTL_GDBSX_PAUSEVCPU: u32 = 1001; pub const XEN_DOMCTL_GDBSX_PAUSEVCPU: u32 = 1001;
pub const XEN_DOMCTL_GDBSX_UNPAUSEVCPU: u32 = 1002; pub const XEN_DOMCTL_GDBSX_UNPAUSEVCPU: u32 = 1002;
@ -243,6 +244,7 @@ pub union DomCtlValue {
pub irq_permission: IrqPermission, pub irq_permission: IrqPermission,
pub assign_device: AssignDevice, pub assign_device: AssignDevice,
pub hvm_context: HvmContext, pub hvm_context: HvmContext,
pub paging_mempool: PagingMempool,
pub pad: [u8; 128], pub pad: [u8; 128],
} }
@ -270,8 +272,6 @@ impl Default for CreateDomain {
handle: Uuid::new_v4().into_bytes(), handle: Uuid::new_v4().into_bytes(),
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
flags: 0, flags: 0,
#[cfg(target_arch = "aarch64")]
flags: 1 << XEN_DOMCTL_CDF_HVM_GUEST,
iommu_opts: 0, iommu_opts: 0,
max_vcpus: 1, max_vcpus: 1,
max_evtchn_port: 1023, max_evtchn_port: 1023,
@ -698,3 +698,9 @@ pub struct HvmContext {
pub size: u32, pub size: u32,
pub buffer: u64, pub buffer: u64,
} }
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct PagingMempool {
pub size: u64,
}

View File

@ -1,6 +1,7 @@
use std::{env, process}; use std::{env, process};
use tokio::fs; use tokio::fs;
use xenclient::error::Result; use xenclient::error::Result;
use xenclient::x86pvh::X86PvhPlatform;
use xenclient::{DomainConfig, XenClient}; use xenclient::{DomainConfig, XenClient};
#[tokio::main] #[tokio::main]
@ -14,7 +15,7 @@ async fn main() -> Result<()> {
} }
let kernel_image_path = args.get(1).expect("argument not specified"); let kernel_image_path = args.get(1).expect("argument not specified");
let initrd_path = args.get(2).expect("argument not specified"); let initrd_path = args.get(2).expect("argument not specified");
let client = XenClient::open(0).await?; let client = XenClient::new(0, X86PvhPlatform::new()).await?;
let config = DomainConfig { let config = DomainConfig {
backend_domid: 0, backend_domid: 0,
name: "xenclient-test".to_string(), name: "xenclient-test".to_string(),

View File

@ -2,7 +2,7 @@ use std::slice;
use log::debug; use log::debug;
use slice_copy::copy; use slice_copy::copy;
use xencall::XenCall; use xencall::{sys::CreateDomain, XenCall};
use crate::{ use crate::{
error::{Error, Result}, error::{Error, Result},
@ -18,7 +18,7 @@ pub struct BootSetup<I: BootImageLoader, P: BootSetupPlatform> {
pub dtb: Option<Vec<u8>>, pub dtb: Option<Vec<u8>>,
} }
#[derive(Debug, Default)] #[derive(Debug, Default, Clone)]
pub struct DomainSegment { pub struct DomainSegment {
pub vstart: u64, pub vstart: u64,
pub vend: u64, pub vend: u64,
@ -40,7 +40,7 @@ pub struct BootDomain {
pub image_info: BootImageInfo, pub image_info: BootImageInfo,
pub phys: PhysicalPages, pub phys: PhysicalPages,
pub store_evtchn: u32, pub store_evtchn: u32,
pub xenstore_mfn: u64, pub store_mfn: u64,
pub initrd_segment: DomainSegment, pub initrd_segment: DomainSegment,
pub consoles: Vec<(u32, u64)>, pub consoles: Vec<(u32, u64)>,
} }
@ -174,9 +174,11 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
phys: PhysicalPages::new(self.call.clone(), self.domid, self.platform.page_shift()), phys: PhysicalPages::new(self.call.clone(), self.domid, self.platform.page_shift()),
initrd_segment: DomainSegment::default(), initrd_segment: DomainSegment::default(),
store_evtchn: 0, store_evtchn: 0,
xenstore_mfn: 0, store_mfn: 0,
}; };
self.platform.initialize_early(&mut domain).await?;
let mut initrd_segment = if !domain.image_info.unmapped_initrd { let mut initrd_segment = if !domain.image_info.unmapped_initrd {
Some(domain.alloc_module(initrd).await?) Some(domain.alloc_module(initrd).await?)
} else { } else {
@ -248,12 +250,15 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
} }
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait BootSetupPlatform { pub trait BootSetupPlatform: Clone {
fn create_domain(&self) -> CreateDomain;
fn page_size(&self) -> u64; fn page_size(&self) -> u64;
fn page_shift(&self) -> u64; fn page_shift(&self) -> u64;
fn needs_early_kernel(&self) -> bool; fn needs_early_kernel(&self) -> bool;
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()>; async fn initialize_early(&mut self, domain: &mut BootDomain) -> Result<()>;
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()>;
async fn alloc_page_tables(&mut self, domain: &mut BootDomain) async fn alloc_page_tables(&mut self, domain: &mut BootDomain)
-> Result<Option<DomainSegment>>; -> Result<Option<DomainSegment>>;

View File

@ -16,12 +16,10 @@ use tokio::time::timeout;
use std::path::PathBuf; use std::path::PathBuf;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
use xencall::sys::{ use xencall::sys::{CreateDomain, DOMCTL_DEV_RDM_RELAXED};
CreateDomain, DOMCTL_DEV_RDM_RELAXED, XEN_DOMCTL_CDF_HAP, XEN_DOMCTL_CDF_HVM_GUEST,
XEN_DOMCTL_CDF_IOMMU, XEN_X86_EMU_LAPIC,
};
use xencall::XenCall; use xencall::XenCall;
use xenstore::{ use xenstore::{
XsPermission, XsdClient, XsdInterface, XsdTransaction, XS_PERM_NONE, XS_PERM_READ, XsPermission, XsdClient, XsdInterface, XsdTransaction, XS_PERM_NONE, XS_PERM_READ,
@ -33,9 +31,10 @@ pub mod x86pv;
pub mod x86pvh; pub mod x86pvh;
#[derive(Clone)] #[derive(Clone)]
pub struct XenClient { pub struct XenClient<P: BootSetupPlatform> {
pub store: XsdClient, pub store: XsdClient,
call: XenCall, call: XenCall,
platform: Arc<P>,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -141,23 +140,16 @@ impl XenClient {
pub async fn open(current_domid: u32) -> Result<XenClient> { pub async fn open(current_domid: u32) -> Result<XenClient> {
let store = XsdClient::open().await?; let store = XsdClient::open().await?;
let call = XenCall::open(current_domid)?; let call = XenCall::open(current_domid)?;
Ok(XenClient { store, call }) Ok(XenClient {
store,
call,
platform: Arc::new(platform),
})
} }
pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> { pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> {
let mut domain = CreateDomain { let mut domain = self.platform.create_domain();
max_vcpus: config.max_vcpus,
..Default::default()
};
domain.max_vcpus = config.max_vcpus; domain.max_vcpus = config.max_vcpus;
if cfg!(target_arch = "aarch64") {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
} else {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP | XEN_DOMCTL_CDF_IOMMU;
domain.arch_domain_config.emulation_flags = XEN_X86_EMU_LAPIC;
}
let domid = self.call.create_domain(domain).await?; let domid = self.call.create_domain(domain).await?;
match self.init(domid, &domain, config).await { match self.init(domid, &domain, config).await {
Ok(created) => Ok(created), Ok(created) => Ok(created),
@ -173,18 +165,18 @@ impl XenClient {
async fn init( async fn init(
&self, &self,
domid: u32, domid: u32,
domain: &CreateDomain, created: &CreateDomain,
config: &DomainConfig, config: &DomainConfig,
) -> Result<CreatedDomain> { ) -> Result<CreatedDomain> {
trace!( trace!(
"XenClient init domid={} domain={:?} config={:?}", "XenClient init domid={} domain={:?} config={:?}",
domid, domid,
domain, created,
config config
); );
let backend_dom_path = self.store.get_domain_path(0).await?; let backend_dom_path = self.store.get_domain_path(0).await?;
let dom_path = self.store.get_domain_path(domid).await?; let dom_path = self.store.get_domain_path(domid).await?;
let uuid_string = Uuid::from_bytes(domain.handle).to_string(); let uuid_string = Uuid::from_bytes(created.handle).to_string();
let vm_path = format!("/vm/{}", uuid_string); let vm_path = format!("/vm/{}", uuid_string);
let ro_perm = &[ let ro_perm = &[
@ -265,7 +257,7 @@ impl XenClient {
tx.write_string( tx.write_string(
format!("{}/uuid", vm_path).as_str(), format!("{}/uuid", vm_path).as_str(),
&Uuid::from_bytes(domain.handle).to_string(), &Uuid::from_bytes(created.handle).to_string(),
) )
.await?; .await?;
tx.write_string(format!("{}/name", dom_path).as_str(), &config.name) tx.write_string(format!("{}/name", dom_path).as_str(), &config.name)
@ -287,19 +279,16 @@ impl XenClient {
} }
self.call.set_max_vcpus(domid, config.max_vcpus).await?; self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call.set_max_mem(domid, config.mem_mb * 1024).await?; self.call
let xenstore_evtchn: u32; .set_max_mem(domid, (config.mem_mb * 1024) + 1024)
let xenstore_mfn: u64; .await?;
let mut domain: BootDomain; let mut domain: BootDomain;
{ {
let loader = ElfImageLoader::load_file_kernel(&config.kernel)?; let loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
let mut boot = let platform = (*self.platform).clone();
BootSetup::new(self.call.clone(), domid, X86PvhPlatform::new(), loader, None); let mut boot = BootSetup::new(self.call.clone(), domid, platform, loader, None);
domain = boot.initialize(&config.initrd, config.mem_mb).await?; domain = boot.initialize(&config.initrd, config.mem_mb).await?;
boot.boot(&mut domain, &config.cmdline).await?; boot.boot(&mut domain, &config.cmdline).await?;
xenstore_evtchn = domain.store_evtchn;
xenstore_mfn = domain.xenstore_mfn;
} }
{ {
@ -330,12 +319,12 @@ impl XenClient {
.await?; .await?;
tx.write_string( tx.write_string(
format!("{}/store/port", dom_path).as_str(), format!("{}/store/port", dom_path).as_str(),
&xenstore_evtchn.to_string(), &domain.store_evtchn.to_string(),
) )
.await?; .await?;
tx.write_string( tx.write_string(
format!("{}/store/ring-ref", dom_path).as_str(), format!("{}/store/ring-ref", dom_path).as_str(),
&xenstore_mfn.to_string(), &domain.store_mfn.to_string(),
) )
.await?; .await?;
for i in 0..config.max_vcpus { for i in 0..config.max_vcpus {
@ -350,7 +339,7 @@ impl XenClient {
} }
if !self if !self
.store .store
.introduce_domain(domid, xenstore_mfn, xenstore_evtchn) .introduce_domain(domid, domain.store_mfn, domain.store_evtchn)
.await? .await?
{ {
return Err(Error::IntroduceDomainFailed); return Err(Error::IntroduceDomainFailed);

View File

@ -1,7 +1,7 @@
use crate::error::Result; use crate::error::Result;
use crate::sys::{XEN_PAGE_SHIFT, XEN_PAGE_SIZE}; use crate::sys::XEN_PAGE_SHIFT;
use crate::Error; use crate::Error;
use libc::{memset, munmap}; use libc::munmap;
use log::debug; use log::debug;
use nix::errno::Errno; use nix::errno::Errno;
use std::ffi::c_void; use std::ffi::c_void;
@ -124,11 +124,20 @@ impl PhysicalPages {
Ok(addr) Ok(addr)
} }
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<PhysicalPage> { pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
let num = (size >> XEN_PAGE_SHIFT) as usize; let count = (size >> XEN_PAGE_SHIFT) as usize;
let mut entries = vec![MmapEntry::default(); count];
for (i, entry) in entries.iter_mut().enumerate() {
entry.mfn = mfn + i as u64;
}
let chunk_size = 1 << XEN_PAGE_SHIFT;
let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
let num = num_per_entry * count;
let mut pfns = vec![u64::MAX; num]; let mut pfns = vec![u64::MAX; num];
for (i, item) in pfns.iter_mut().enumerate().take(num) { for i in 0..count {
*item = mfn + i as u64; for j in 0..num_per_entry {
pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
}
} }
let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT; let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
@ -148,24 +157,21 @@ impl PhysicalPages {
let page = PhysicalPage { let page = PhysicalPage {
pfn: mfn, pfn: mfn,
ptr: addr, ptr: addr,
count: num as u64, count: count as u64,
}; };
debug!( debug!(
"alloc_mfn {:#x}+{:#x} at {:#x}", "alloc_mfn {:#x}+{:#x} at {:#x}",
page.pfn, page.count, page.ptr page.pfn, page.count, page.ptr
); );
self.pages.push(page.clone()); self.pages.push(page);
Ok(page) Ok(addr)
} }
pub async fn clear_pages(&mut self, pfn: u64, count: u64) -> Result<()> { pub async fn clear_pages(&mut self, pfn: u64, count: u64) -> Result<()> {
let mfn = if !self.p2m.is_empty() { let ptr = self.pfn_to_ptr(pfn, count).await?;
self.p2m[pfn as usize] let slice = unsafe {
} else { slice::from_raw_parts_mut(ptr as *mut u8, (count * (1 << self.page_shift)) as usize)
pfn
}; };
let page = self.map_foreign_pages(mfn, count << XEN_PAGE_SHIFT).await?;
let slice = unsafe { slice::from_raw_parts_mut(page.ptr as *mut u8, (count << XEN_PAGE_SHIFT) as usize) };
slice.fill(0); slice.fill(0);
Ok(()) Ok(())
} }

View File

@ -142,3 +142,5 @@ pub const HVM_PARAM_CONSOLE_EVTCHN: u32 = 8;
pub const HVM_PARAM_PAGING_RING_PFN: u32 = 27; pub const HVM_PARAM_PAGING_RING_PFN: u32 = 27;
pub const HVM_PARAM_MONITOR_RING_PFN: u32 = 28; pub const HVM_PARAM_MONITOR_RING_PFN: u32 = 28;
pub const HVM_PARAM_SHARING_RING_PFN: u32 = 29; pub const HVM_PARAM_SHARING_RING_PFN: u32 = 29;
pub const HVM_PARAM_TIMER_MODE: u32 = 10;
pub const HVM_PARAM_ALTP2M: u32 = 35;

View File

@ -9,7 +9,8 @@ use log::{debug, trace};
use nix::errno::Errno; use nix::errno::Errno;
use slice_copy::copy; use slice_copy::copy;
use xencall::sys::{ use xencall::sys::{
x8664VcpuGuestContext, E820Entry, VcpuGuestContextAny, E820_MAX, E820_RAM, E820_UNUSABLE, MMUEXT_PIN_L4_TABLE x8664VcpuGuestContext, CreateDomain, E820Entry, VcpuGuestContextAny, E820_MAX, E820_RAM,
E820_UNUSABLE, MMUEXT_PIN_L4_TABLE, XEN_DOMCTL_CDF_IOMMU,
}; };
use crate::{ use crate::{
@ -141,7 +142,7 @@ struct VmemRange {
_nid: u32, _nid: u32,
} }
#[derive(Default)] #[derive(Default, Clone)]
pub struct X86PvPlatform { pub struct X86PvPlatform {
table: PageTable, table: PageTable,
p2m_segment: Option<DomainSegment>, p2m_segment: Option<DomainSegment>,
@ -433,6 +434,13 @@ impl X86PvPlatform {
#[async_trait::async_trait] #[async_trait::async_trait]
impl BootSetupPlatform for X86PvPlatform { impl BootSetupPlatform for X86PvPlatform {
fn create_domain(&self) -> CreateDomain {
CreateDomain {
flags: XEN_DOMCTL_CDF_IOMMU,
..Default::default()
}
}
fn page_size(&self) -> u64 { fn page_size(&self) -> u64 {
X86_PAGE_SIZE X86_PAGE_SIZE
} }
@ -445,7 +453,11 @@ impl BootSetupPlatform for X86PvPlatform {
false false
} }
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()> { async fn initialize_early(&mut self, _: &mut BootDomain) -> Result<()> {
Ok(())
}
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()> {
domain.call.set_address_size(domain.domid, 64).await?; domain.call.set_address_size(domain.domid, 64).await?;
domain domain
.call .call
@ -695,7 +707,7 @@ impl BootSetupPlatform for X86PvPlatform {
} }
self.start_info_segment = Some(domain.alloc_page()?); self.start_info_segment = Some(domain.alloc_page()?);
self.xenstore_segment = Some(domain.alloc_page()?); self.xenstore_segment = Some(domain.alloc_page()?);
domain.xenstore_mfn = domain.phys.p2m[self.xenstore_segment.as_ref().unwrap().pfn as usize]; domain.store_mfn = domain.phys.p2m[self.xenstore_segment.as_ref().unwrap().pfn as usize];
let evtchn = domain.call.evtchn_alloc_unbound(domain.domid, 0).await?; let evtchn = domain.call.evtchn_alloc_unbound(domain.domid, 0).await?;
let page = domain.alloc_page()?; let page = domain.alloc_page()?;
domain domain
@ -726,7 +738,7 @@ impl BootSetupPlatform for X86PvPlatform {
let info = domain let info = domain
.phys .phys
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE) .map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
.await?.ptr as *mut SharedInfo; .await? as *mut SharedInfo;
unsafe { unsafe {
let size = size_of::<SharedInfo>(); let size = size_of::<SharedInfo>();
let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size); let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
@ -861,9 +873,10 @@ impl BootSetupPlatform for X86PvPlatform {
vcpu.kernel_ss = vcpu.user_regs.ss as u64; vcpu.kernel_ss = vcpu.user_regs.ss as u64;
vcpu.kernel_sp = vcpu.user_regs.rsp; vcpu.kernel_sp = vcpu.user_regs.rsp;
trace!("vcpu context: {:?}", vcpu); trace!("vcpu context: {:?}", vcpu);
domain.call.set_vcpu_context(domain.domid, 0, VcpuGuestContextAny { domain
value: vcpu, .call
}).await?; .set_vcpu_context(domain.domid, 0, VcpuGuestContextAny { value: vcpu })
.await?;
Ok(()) Ok(())
} }

View File

@ -1,19 +1,25 @@
use std::{ use std::{
mem::{size_of, MaybeUninit}, os::raw::{c_char, c_void}, ptr::addr_of_mut, slice mem::{size_of, MaybeUninit},
os::raw::{c_char, c_void},
ptr::addr_of_mut,
slice,
}; };
use libc::munmap; use libc::munmap;
use log::trace;
use nix::errno::Errno; use nix::errno::Errno;
use xencall::sys::{ use xencall::sys::{
x8664VcpuGuestContext, E820Entry, E820_RAM, MEMFLAGS_POPULATE_ON_DEMAND ArchDomainConfig, CreateDomain, E820Entry, E820_RAM, E820_RESERVED,
MEMFLAGS_POPULATE_ON_DEMAND, XEN_DOMCTL_CDF_HAP, XEN_DOMCTL_CDF_HVM_GUEST,
XEN_DOMCTL_CDF_IOMMU, XEN_X86_EMU_LAPIC,
}; };
use crate::{ use crate::{
boot::{BootDomain, BootSetupPlatform, DomainSegment}, boot::{BootDomain, BootSetupPlatform, DomainSegment},
error::{Error, Result}, error::{Error, Result},
sys::{ sys::{
GrantEntry, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_CONSOLE_PFN, HVM_PARAM_IOREQ_PFN, HVM_PARAM_MONITOR_RING_PFN, HVM_PARAM_PAGING_RING_PFN, HVM_PARAM_SHARING_RING_PFN, HVM_PARAM_STORE_PFN, SUPERPAGE_1GB_NR_PFNS, SUPERPAGE_1GB_SHIFT, SUPERPAGE_2MB_NR_PFNS, SUPERPAGE_2MB_SHIFT, SUPERPAGE_BATCH_SIZE, VGCF_IN_KERNEL, VGCF_ONLINE, XEN_PAGE_SHIFT GrantEntry, HVM_PARAM_ALTP2M, HVM_PARAM_BUFIOREQ_PFN, HVM_PARAM_CONSOLE_PFN,
HVM_PARAM_IOREQ_PFN, HVM_PARAM_MONITOR_RING_PFN, HVM_PARAM_PAGING_RING_PFN,
HVM_PARAM_SHARING_RING_PFN, HVM_PARAM_STORE_PFN, HVM_PARAM_TIMER_MODE, XEN_PAGE_SHIFT,
}, },
}; };
@ -240,9 +246,12 @@ struct VmemRange {
_nid: u32, _nid: u32,
} }
#[derive(Default)] #[derive(Default, Clone)]
pub struct X86PvhPlatform { pub struct X86PvhPlatform {
start_info_segment: Option<DomainSegment>, start_info_segment: Option<DomainSegment>,
lowmem_end: u64,
highmem_end: u64,
mmio_start: u64,
} }
const X86_CR0_PE: u64 = 0x01; const X86_CR0_PE: u64 = 0x01;
@ -255,19 +264,35 @@ impl X86PvhPlatform {
} }
} }
pub fn construct_memmap(&self, mem_size_bytes: u64) -> Result<Vec<E820Entry>> { pub fn construct_memmap(&self) -> Result<Vec<E820Entry>> {
let entries = vec![ let mut entries = Vec::new();
E820Entry {
addr: 0, let highmem_size = if self.highmem_end > 0 {
size: mem_size_bytes, self.highmem_end - (1u64 << 32)
typ: E820_RAM } else {
}, 0
E820Entry { };
let lowmem_start = 0u64;
entries.push(E820Entry {
addr: lowmem_start,
size: self.lowmem_end - lowmem_start,
typ: E820_RAM,
});
entries.push(E820Entry {
addr: (X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << XEN_PAGE_SHIFT, addr: (X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << XEN_PAGE_SHIFT,
size: X86_HVM_NR_SPECIAL_PAGES << XEN_PAGE_SHIFT, size: X86_HVM_NR_SPECIAL_PAGES << XEN_PAGE_SHIFT,
typ: E820_RAM typ: E820_RESERVED,
}, });
];
if highmem_size > 0 {
entries.push(E820Entry {
addr: 1u64 << 32,
size: highmem_size,
typ: E820_RAM,
});
}
Ok(entries) Ok(entries)
} }
@ -280,6 +305,17 @@ impl X86PvhPlatform {
#[async_trait::async_trait] #[async_trait::async_trait]
impl BootSetupPlatform for X86PvhPlatform { impl BootSetupPlatform for X86PvhPlatform {
fn create_domain(&self) -> CreateDomain {
CreateDomain {
flags: XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP | XEN_DOMCTL_CDF_IOMMU,
arch_domain_config: ArchDomainConfig {
emulation_flags: XEN_X86_EMU_LAPIC,
..Default::default()
},
..Default::default()
}
}
fn page_size(&self) -> u64 { fn page_size(&self) -> u64 {
X86_PAGE_SIZE X86_PAGE_SIZE
} }
@ -292,7 +328,49 @@ impl BootSetupPlatform for X86PvhPlatform {
false false
} }
async fn initialize_memory(&self, domain: &mut BootDomain) -> Result<()> { async fn initialize_early(&mut self, domain: &mut BootDomain) -> Result<()> {
let mut memory_start =
(X86_HVM_END_SPECIAL_REGION - X86_HVM_NR_SPECIAL_PAGES) << self.page_shift();
memory_start = memory_start.min(LAPIC_BASE_ADDRESS);
memory_start = memory_start.min(ACPI_INFO_PHYSICAL_ADDRESS);
let mmio_size = (4 * 1024 * 1024 * 1024) - memory_start;
let mut lowmem_end = domain.total_pages << self.page_shift();
let mut highmem_end = 0u64;
let mmio_start = (1u64 << 32) - mmio_size;
if lowmem_end > mmio_start {
highmem_end = (1 << 32) + (lowmem_end - mmio_start);
lowmem_end = mmio_start;
}
self.lowmem_end = lowmem_end;
self.highmem_end = highmem_end;
self.mmio_start = mmio_start;
domain
.call
.set_hvm_param(domain.domid, HVM_PARAM_TIMER_MODE, 1)
.await?;
domain
.call
.set_hvm_param(domain.domid, HVM_PARAM_ALTP2M, 0)
.await?;
domain
.call
.set_paging_mempool_size(domain.domid, 1024 << 12)
.await?;
let memmap = self.construct_memmap()?;
domain
.call
.set_memory_map(domain.domid, memmap.clone())
.await?;
Ok(())
}
async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()> {
domain
.call
.claim_pages(domain.domid, domain.total_pages)
.await?;
let memflags = if domain.target_pages > domain.total_pages { let memflags = if domain.target_pages > domain.total_pages {
MEMFLAGS_POPULATE_ON_DEMAND MEMFLAGS_POPULATE_ON_DEMAND
} else { } else {
@ -300,13 +378,21 @@ impl BootSetupPlatform for X86PvhPlatform {
}; };
let mut vmemranges: Vec<VmemRange> = Vec::new(); let mut vmemranges: Vec<VmemRange> = Vec::new();
let stub = VmemRange { vmemranges.push(VmemRange {
start: 0, start: 0,
end: domain.total_pages << self.page_shift(), end: self.lowmem_end,
_flags: 0, _flags: 0,
_nid: 0, _nid: 0,
}; });
vmemranges.push(stub);
if self.highmem_end > (1u64 << 32) {
vmemranges.push(VmemRange {
start: 1u64 << 32,
end: self.highmem_end,
_flags: 0,
_nid: 0,
});
}
let mut p2m_size: u64 = 0; let mut p2m_size: u64 = 0;
let mut total: u64 = 0; let mut total: u64 = 0;
@ -320,8 +406,6 @@ impl BootSetupPlatform for X86PvhPlatform {
} }
for range in &vmemranges { for range in &vmemranges {
let memflags = memflags;
let end_pages = range.end >> self.page_shift(); let end_pages = range.end >> self.page_shift();
let mut cur_pages = range.start >> self.page_shift(); let mut cur_pages = range.start >> self.page_shift();
@ -334,26 +418,25 @@ impl BootSetupPlatform for X86PvhPlatform {
extents[i as usize] = cur_pages + i; extents[i as usize] = cur_pages + i;
} }
let _ = domain.call.populate_physmap(domain.domid, count, 0 as u32, memflags, &extents).await?; let _ = domain
cur_pages += count as u64; .call
.populate_physmap(domain.domid, count, 0_u32, memflags, &extents)
.await?;
cur_pages += count;
} }
} }
} }
domain.call.claim_pages(domain.domid, 0).await?;
Ok(()) Ok(())
} }
async fn alloc_p2m_segment( async fn alloc_p2m_segment(&mut self, _: &mut BootDomain) -> Result<Option<DomainSegment>> {
&mut self,
_: &mut BootDomain,
) -> Result<Option<DomainSegment>> {
Ok(None) Ok(None)
} }
async fn alloc_page_tables( async fn alloc_page_tables(&mut self, _: &mut BootDomain) -> Result<Option<DomainSegment>> {
&mut self,
_: &mut BootDomain,
) -> Result<Option<DomainSegment>> {
Ok(None) Ok(None)
} }
@ -366,23 +449,81 @@ impl BootSetupPlatform for X86PvhPlatform {
} }
async fn alloc_magic_pages(&mut self, domain: &mut BootDomain) -> Result<()> { async fn alloc_magic_pages(&mut self, domain: &mut BootDomain) -> Result<()> {
let memmap = self.construct_memmap(domain.total_pages << XEN_PAGE_SHIFT)?; let memmap = self.construct_memmap()?;
domain.call.set_memory_map(domain.domid, memmap.clone()).await?;
let mut special_array = vec![0u64; X86_HVM_NR_SPECIAL_PAGES as usize]; let mut special_array = vec![0u64; X86_HVM_NR_SPECIAL_PAGES as usize];
for i in 0..X86_HVM_NR_SPECIAL_PAGES { for i in 0..X86_HVM_NR_SPECIAL_PAGES {
special_array[i as usize] = special_pfn(i as u32); special_array[i as usize] = special_pfn(i as u32);
} }
let pages = domain.call.populate_physmap(domain.domid, X86_HVM_NR_SPECIAL_PAGES, 0, 0, &special_array).await?; domain
println!("{:?}", pages); .call
domain.phys.clear_pages(special_pfn(0), X86_HVM_NR_SPECIAL_PAGES).await?; .populate_physmap(
domain.call.set_hvm_param(domain.domid, HVM_PARAM_STORE_PFN, special_pfn(SPECIALPAGE_XENSTORE)).await?; domain.domid,
domain.call.set_hvm_param(domain.domid, HVM_PARAM_BUFIOREQ_PFN, special_pfn(SPECIALPAGE_BUFIOREQ)).await?; special_array.len() as u64,
domain.call.set_hvm_param(domain.domid, HVM_PARAM_IOREQ_PFN, special_pfn(SPECIALPAGE_IOREQ)).await?; 0,
domain.call.set_hvm_param(domain.domid, HVM_PARAM_CONSOLE_PFN, special_pfn(SPECIALPAGE_CONSOLE)).await?; 0,
domain.call.set_hvm_param(domain.domid, HVM_PARAM_PAGING_RING_PFN, special_pfn(SPECIALPAGE_PAGING)).await?; &special_array,
domain.call.set_hvm_param(domain.domid, HVM_PARAM_MONITOR_RING_PFN, special_pfn(SPECIALPAGE_ACCESS)).await?; )
domain.call.set_hvm_param(domain.domid, HVM_PARAM_SHARING_RING_PFN, special_pfn(SPECIALPAGE_SHARING)).await?; .await?;
domain
.phys
.clear_pages(special_pfn(0), special_array.len() as u64)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_STORE_PFN,
special_pfn(SPECIALPAGE_XENSTORE),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_BUFIOREQ_PFN,
special_pfn(SPECIALPAGE_BUFIOREQ),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_IOREQ_PFN,
special_pfn(SPECIALPAGE_IOREQ),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_CONSOLE_PFN,
special_pfn(SPECIALPAGE_CONSOLE),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_PAGING_RING_PFN,
special_pfn(SPECIALPAGE_PAGING),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_MONITOR_RING_PFN,
special_pfn(SPECIALPAGE_ACCESS),
)
.await?;
domain
.call
.set_hvm_param(
domain.domid,
HVM_PARAM_SHARING_RING_PFN,
special_pfn(SPECIALPAGE_SHARING),
)
.await?;
let mut start_info_size = size_of::<HvmStartInfo>(); let mut start_info_size = size_of::<HvmStartInfo>();
@ -391,25 +532,16 @@ impl BootSetupPlatform for X86PvhPlatform {
self.start_info_segment = Some(domain.alloc_segment(0, start_info_size as u64).await?); self.start_info_segment = Some(domain.alloc_segment(0, start_info_size as u64).await?);
domain.consoles.push((0, special_pfn(SPECIALPAGE_CONSOLE))); domain.consoles.push((0, special_pfn(SPECIALPAGE_CONSOLE)));
domain.xenstore_mfn = special_pfn(SPECIALPAGE_XENSTORE); domain.store_mfn = special_pfn(SPECIALPAGE_XENSTORE);
Ok(()) Ok(())
} }
async fn setup_shared_info( async fn setup_shared_info(&mut self, _: &mut BootDomain, _: u64) -> Result<()> {
&mut self,
_: &mut BootDomain,
_: u64,
) -> Result<()> {
Ok(()) Ok(())
} }
async fn setup_start_info( async fn setup_start_info(&mut self, _: &mut BootDomain, _: &str, _: u64) -> Result<()> {
&mut self,
_: &mut BootDomain,
_: &str,
_: u64,
) -> Result<()> {
Ok(()) Ok(())
} }
@ -420,9 +552,18 @@ impl BootSetupPlatform for X86PvhPlatform {
async fn vcpu(&mut self, domain: &mut BootDomain) -> Result<()> { async fn vcpu(&mut self, domain: &mut BootDomain) -> Result<()> {
let size = domain.call.get_hvm_context(domain.domid, None).await?; let size = domain.call.get_hvm_context(domain.domid, None).await?;
let mut full_context = vec![0u8; size as usize]; let mut full_context = vec![0u8; size as usize];
domain.call.get_hvm_context(domain.domid, Some(&mut full_context)).await?; domain
.call
.get_hvm_context(domain.domid, Some(&mut full_context))
.await?;
let mut ctx: BspCtx = unsafe { MaybeUninit::zeroed().assume_init() }; let mut ctx: BspCtx = unsafe { MaybeUninit::zeroed().assume_init() };
unsafe { std::ptr::copy(full_context.as_ptr(), addr_of_mut!(ctx) as *mut u8, size_of::<HvmSaveDescriptor>() + size_of::<HvmSaveHeader>()) }; unsafe {
std::ptr::copy(
full_context.as_ptr(),
addr_of_mut!(ctx) as *mut u8,
size_of::<HvmSaveDescriptor>() + size_of::<HvmSaveHeader>(),
)
};
ctx.cpu_d.instance = 0; ctx.cpu_d.instance = 0;
ctx.cpu.cs_base = 0; ctx.cpu.cs_base = 0;
ctx.cpu.ds_base = 0; ctx.cpu.ds_base = 0;
@ -466,7 +607,7 @@ impl BootSetupPlatform for X86PvhPlatform {
entries[0].frame = console_gfn as u32; entries[0].frame = console_gfn as u32;
entries[1].flags = 1 << 0; entries[1].flags = 1 << 0;
entries[1].domid = 0; entries[1].domid = 0;
entries[1].frame = domain.xenstore_mfn as u32; entries[1].frame = domain.store_mfn as u32;
unsafe { unsafe {
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT); let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
if result != 0 { if result != 0 {
@ -490,5 +631,7 @@ const SPECIALPAGE_SHARING: u32 = 2;
const SPECIALPAGE_BUFIOREQ: u32 = 3; const SPECIALPAGE_BUFIOREQ: u32 = 3;
const SPECIALPAGE_XENSTORE: u32 = 4; const SPECIALPAGE_XENSTORE: u32 = 4;
const SPECIALPAGE_IOREQ: u32 = 5; const SPECIALPAGE_IOREQ: u32 = 5;
const SPECIALPAGE_IDENT_PT: u32 = 6; const _SPECIALPAGE_IDENT_PT: u32 = 6;
const SPECIALPAGE_CONSOLE: u32 = 7; const SPECIALPAGE_CONSOLE: u32 = 7;
const LAPIC_BASE_ADDRESS: u64 = 0xfee00000;
const ACPI_INFO_PHYSICAL_ADDRESS: u64 = 0xFC000000;