mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 13:11:31 +00:00
krata: work on parallel reconciliation
This commit is contained in:
@ -9,6 +9,7 @@ edition = "2021"
|
||||
resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
elf = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
@ -16,6 +17,7 @@ log = { workspace = true }
|
||||
krata-xencall = { path = "../xencall", version = "^0.0.3" }
|
||||
krata-xenstore = { path = "../xenstore", version = "^0.0.3" }
|
||||
memchr = { workspace = true }
|
||||
nix = { workspace = true }
|
||||
slice-copy = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
@ -24,6 +26,7 @@ xz2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "xenclient"
|
||||
|
@ -13,7 +13,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
let kernel_image_path = args.get(1).expect("argument not specified");
|
||||
let initrd_path = args.get(2).expect("argument not specified");
|
||||
let mut client = XenClient::open(0).await?;
|
||||
let client = XenClient::open(0).await?;
|
||||
let config = DomainConfig {
|
||||
backend_domid: 0,
|
||||
name: "xenclient-test",
|
||||
|
@ -41,9 +41,9 @@ impl Arm64BootSetup {
|
||||
Arm64BootSetup {}
|
||||
}
|
||||
|
||||
fn populate_one_size(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
async fn populate_one_size(
|
||||
&self,
|
||||
setup: &mut BootSetup<'_>,
|
||||
pfn_shift: u64,
|
||||
base_pfn: u64,
|
||||
pfn_count: u64,
|
||||
@ -78,20 +78,23 @@ impl Arm64BootSetup {
|
||||
extents[i as usize] = base_pfn + (i << pfn_shift);
|
||||
}
|
||||
|
||||
let result_extents = setup.call.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
pfn_shift as u32,
|
||||
0,
|
||||
&extents[0usize..count as usize],
|
||||
)?;
|
||||
let result_extents = setup
|
||||
.call
|
||||
.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
pfn_shift as u32,
|
||||
0,
|
||||
&extents[0usize..count as usize],
|
||||
)
|
||||
.await?;
|
||||
slice_copy::copy(extents, &result_extents);
|
||||
Ok((result_extents.len() as u64) << pfn_shift)
|
||||
}
|
||||
|
||||
fn populate_guest_memory(
|
||||
async fn populate_guest_memory(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
setup: &mut BootSetup<'_>,
|
||||
base_pfn: u64,
|
||||
pfn_count: u64,
|
||||
) -> Result<()> {
|
||||
@ -99,43 +102,51 @@ impl Arm64BootSetup {
|
||||
|
||||
for pfn in 0..extents.len() {
|
||||
let mut allocsz = (1024 * 1024).min(pfn_count - pfn as u64);
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_512G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_512G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_1G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_1G_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_2M_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_2M_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz > 0 {
|
||||
continue;
|
||||
}
|
||||
allocsz = self.populate_one_size(
|
||||
setup,
|
||||
PFN_4K_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)?;
|
||||
allocsz = self
|
||||
.populate_one_size(
|
||||
setup,
|
||||
PFN_4K_SHIFT,
|
||||
base_pfn + pfn as u64,
|
||||
allocsz,
|
||||
&mut extents,
|
||||
)
|
||||
.await?;
|
||||
if allocsz == 0 {
|
||||
return Err(Error::MemorySetupFailed("allocsz is zero"));
|
||||
}
|
||||
@ -145,6 +156,7 @@ impl Arm64BootSetup {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ArchBootSetup for Arm64BootSetup {
|
||||
fn page_size(&mut self) -> u64 {
|
||||
ARM_PAGE_SIZE
|
||||
@ -158,15 +170,15 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
true
|
||||
}
|
||||
|
||||
fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
|
||||
async fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
|
||||
async fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
@ -176,7 +188,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
let kernel_segment = kernel_segment
|
||||
.as_ref()
|
||||
.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
|
||||
setup.call.claim_pages(setup.domid, total_pages)?;
|
||||
setup.call.claim_pages(setup.domid, total_pages).await?;
|
||||
let mut ramsize = total_pages << XEN_PAGE_SHIFT;
|
||||
|
||||
let bankbase = GUEST_RAM_BANK_BASES;
|
||||
@ -214,7 +226,8 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
}
|
||||
|
||||
for i in 0..2 {
|
||||
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])?;
|
||||
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])
|
||||
.await?;
|
||||
}
|
||||
|
||||
let bank0end = bankbase[0] + (rambank_size[0] << XEN_PAGE_SHIFT);
|
||||
@ -227,15 +240,15 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
} else {
|
||||
return Err(Error::MemorySetupFailed("unable to determine modbase"));
|
||||
};
|
||||
setup.call.claim_pages(setup.domid, 0)?;
|
||||
setup.call.claim_pages(setup.domid, 0).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
async fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let mut vcpu = VcpuGuestContext::default();
|
||||
vcpu.user_regs.pc = state.image_info.virt_entry;
|
||||
vcpu.user_regs.x0 = 0xffffffff;
|
||||
@ -249,11 +262,11 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
vcpu.user_regs.cpsr = PSR_GUEST64_INIT;
|
||||
vcpu.flags = 1 << 0; // VGCF_ONLINE
|
||||
trace!("vcpu context: {:?}", vcpu);
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
_: &mut BootSetup,
|
||||
_: &BootImageInfo,
|
||||
@ -261,7 +274,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
_: &mut BootSetup,
|
||||
_: &BootImageInfo,
|
||||
@ -269,7 +282,7 @@ impl ArchBootSetup for Arm64BootSetup {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
async fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use crate::sys::{GrantEntry, XEN_PAGE_SHIFT};
|
||||
use crate::Error;
|
||||
use libc::munmap;
|
||||
use log::debug;
|
||||
use nix::errno::Errno;
|
||||
use slice_copy::copy;
|
||||
|
||||
use crate::mem::ARCH_PAGE_SHIFT;
|
||||
@ -83,33 +84,34 @@ impl BootSetup<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_memory(
|
||||
async fn initialize_memory(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
total_pages: u64,
|
||||
kernel_segment: &Option<DomainSegment>,
|
||||
initrd_segment: &Option<DomainSegment>,
|
||||
) -> Result<()> {
|
||||
self.call.set_address_size(self.domid, 64)?;
|
||||
arch.meminit(self, total_pages, kernel_segment, initrd_segment)?;
|
||||
self.call.set_address_size(self.domid, 64).await?;
|
||||
arch.meminit(self, total_pages, kernel_segment, initrd_segment)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
|
||||
async fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
|
||||
if image_info.virt_hypercall == XEN_UNSET_ADDR {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let pfn = (image_info.virt_hypercall - image_info.virt_base) >> ARCH_PAGE_SHIFT;
|
||||
let mfn = self.phys.p2m[pfn as usize];
|
||||
self.call.hypercall_init(self.domid, mfn)?;
|
||||
self.call.hypercall_init(self.domid, mfn).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn initialize(
|
||||
pub async fn initialize<I: BootImageLoader + Send + Sync>(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
image_loader: &dyn BootImageLoader,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
image_loader: &I,
|
||||
initrd: &[u8],
|
||||
max_vcpus: u32,
|
||||
mem_mb: u64,
|
||||
@ -117,60 +119,68 @@ impl BootSetup<'_> {
|
||||
) -> Result<BootState> {
|
||||
debug!("initialize max_vcpus={:?} mem_mb={:?}", max_vcpus, mem_mb);
|
||||
|
||||
let page_size = arch.page_size();
|
||||
let image_info = image_loader.parse()?;
|
||||
debug!("initialize image_info={:?}", image_info);
|
||||
let mut kernel_segment: Option<DomainSegment> = None;
|
||||
let mut initrd_segment: Option<DomainSegment> = None;
|
||||
if !image_info.unmapped_initrd {
|
||||
initrd_segment = Some(self.alloc_module(arch, initrd)?);
|
||||
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
|
||||
}
|
||||
|
||||
if arch.needs_early_kernel() {
|
||||
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
|
||||
kernel_segment = Some(
|
||||
self.load_kernel_segment(page_size, image_loader, &image_info)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let total_pages = mem_mb << (20 - arch.page_shift());
|
||||
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)?;
|
||||
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)
|
||||
.await?;
|
||||
self.virt_alloc_end = image_info.virt_base;
|
||||
|
||||
if kernel_segment.is_none() {
|
||||
kernel_segment = Some(self.load_kernel_segment(arch, image_loader, &image_info)?);
|
||||
kernel_segment = Some(
|
||||
self.load_kernel_segment(page_size, image_loader, &image_info)
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
let mut p2m_segment: Option<DomainSegment> = None;
|
||||
if image_info.virt_p2m_base >= image_info.virt_base
|
||||
|| (image_info.virt_p2m_base & ((1 << arch.page_shift()) - 1)) != 0
|
||||
{
|
||||
p2m_segment = arch.alloc_p2m_segment(self, &image_info)?;
|
||||
p2m_segment = arch.alloc_p2m_segment(self, &image_info).await?;
|
||||
}
|
||||
let start_info_segment = self.alloc_page(arch)?;
|
||||
let xenstore_segment = self.alloc_page(arch)?;
|
||||
let start_info_segment = self.alloc_page(page_size)?;
|
||||
let xenstore_segment = self.alloc_page(page_size)?;
|
||||
let mut consoles: Vec<(u32, DomainSegment)> = Vec::new();
|
||||
for _ in 0..console_count {
|
||||
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
|
||||
let page = self.alloc_page(arch)?;
|
||||
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
|
||||
let page = self.alloc_page(page_size)?;
|
||||
consoles.push((evtchn, page));
|
||||
}
|
||||
let page_table_segment = arch.alloc_page_tables(self, &image_info)?;
|
||||
let boot_stack_segment = self.alloc_page(arch)?;
|
||||
let page_table_segment = arch.alloc_page_tables(self, &image_info).await?;
|
||||
let boot_stack_segment = self.alloc_page(page_size)?;
|
||||
|
||||
if self.virt_pgtab_end > 0 {
|
||||
self.alloc_padding_pages(arch, self.virt_pgtab_end)?;
|
||||
self.alloc_padding_pages(page_size, self.virt_pgtab_end)?;
|
||||
}
|
||||
|
||||
if p2m_segment.is_none() {
|
||||
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info)? {
|
||||
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info).await? {
|
||||
segment.vstart = image_info.virt_p2m_base;
|
||||
p2m_segment = Some(segment);
|
||||
}
|
||||
}
|
||||
|
||||
if image_info.unmapped_initrd {
|
||||
initrd_segment = Some(self.alloc_module(arch, initrd)?);
|
||||
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
|
||||
}
|
||||
|
||||
let initrd_segment = initrd_segment.unwrap();
|
||||
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0)?;
|
||||
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
|
||||
|
||||
let kernel_segment =
|
||||
kernel_segment.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
|
||||
@ -192,35 +202,37 @@ impl BootSetup<'_> {
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn boot(
|
||||
pub async fn boot(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
|
||||
state: &mut BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()> {
|
||||
let domain_info = self.call.get_domain_info(self.domid)?;
|
||||
let domain_info = self.call.get_domain_info(self.domid).await?;
|
||||
let shared_info_frame = domain_info.shared_info_frame;
|
||||
state.shared_info_frame = shared_info_frame;
|
||||
arch.setup_page_tables(self, state)?;
|
||||
arch.setup_start_info(self, state, cmdline)?;
|
||||
self.setup_hypercall_page(&state.image_info)?;
|
||||
arch.bootlate(self, state)?;
|
||||
arch.setup_shared_info(self, state.shared_info_frame)?;
|
||||
arch.vcpu(self, state)?;
|
||||
arch.setup_page_tables(self, state).await?;
|
||||
arch.setup_start_info(self, state, cmdline).await?;
|
||||
self.setup_hypercall_page(&state.image_info).await?;
|
||||
arch.bootlate(self, state).await?;
|
||||
arch.setup_shared_info(self, state.shared_info_frame)
|
||||
.await?;
|
||||
arch.vcpu(self, state).await?;
|
||||
self.phys.unmap_all()?;
|
||||
self.gnttab_seed(state)?;
|
||||
self.gnttab_seed(state).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
|
||||
async fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
|
||||
let console_gfn =
|
||||
self.phys.p2m[state.consoles.first().map(|x| x.1.pfn).unwrap_or(0) as usize];
|
||||
let xenstore_gfn = self.phys.p2m[state.xenstore_segment.pfn as usize];
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, 1 << XEN_PAGE_SHIFT)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
self.call.map_resource(self.domid, 1, 0, 0, 1, addr)?;
|
||||
self.call.map_resource(self.domid, 1, 0, 0, 1, addr).await?;
|
||||
let entries = unsafe { slice::from_raw_parts_mut(addr as *mut GrantEntry, 2) };
|
||||
entries[0].flags = 1 << 0;
|
||||
entries[0].domid = 0;
|
||||
@ -231,23 +243,25 @@ impl BootSetup<'_> {
|
||||
unsafe {
|
||||
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
|
||||
if result != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(result)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_kernel_segment(
|
||||
async fn load_kernel_segment<I: BootImageLoader + Send + Sync>(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
image_loader: &dyn BootImageLoader,
|
||||
page_size: u64,
|
||||
image_loader: &I,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<DomainSegment> {
|
||||
let kernel_segment = self.alloc_segment(
|
||||
arch,
|
||||
image_info.virt_kstart,
|
||||
image_info.virt_kend - image_info.virt_kstart,
|
||||
)?;
|
||||
let kernel_segment = self
|
||||
.alloc_segment(
|
||||
page_size,
|
||||
image_info.virt_kstart,
|
||||
image_info.virt_kend - image_info.virt_kstart,
|
||||
)
|
||||
.await?;
|
||||
let kernel_segment_ptr = kernel_segment.addr as *mut u8;
|
||||
let kernel_segment_slice =
|
||||
unsafe { slice::from_raw_parts_mut(kernel_segment_ptr, kernel_segment.size as usize) };
|
||||
@ -264,18 +278,19 @@ impl BootSetup<'_> {
|
||||
(1 << bits) - 1
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_segment(
|
||||
pub(crate) async fn alloc_segment(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
page_size: u64,
|
||||
start: u64,
|
||||
size: u64,
|
||||
) -> Result<DomainSegment> {
|
||||
debug!("alloc_segment {:#x} {:#x}", start, size);
|
||||
if start > 0 {
|
||||
self.alloc_padding_pages(arch, start)?;
|
||||
self.alloc_padding_pages(page_size, start)?;
|
||||
}
|
||||
|
||||
let page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
|
||||
let pages = (size + page_size as u64 - 1) / page_size as u64;
|
||||
let local_page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
|
||||
let pages = (size + local_page_size as u64 - 1) / local_page_size as u64;
|
||||
let start = self.virt_alloc_end;
|
||||
|
||||
let mut segment = DomainSegment {
|
||||
@ -288,12 +303,12 @@ impl BootSetup<'_> {
|
||||
pages,
|
||||
};
|
||||
|
||||
self.chk_alloc_pages(arch, pages)?;
|
||||
self.chk_alloc_pages(page_size, pages)?;
|
||||
|
||||
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages)?;
|
||||
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages).await?;
|
||||
segment.addr = ptr;
|
||||
let slice = unsafe {
|
||||
slice::from_raw_parts_mut(ptr as *mut u8, (pages * page_size as u64) as usize)
|
||||
slice::from_raw_parts_mut(ptr as *mut u8, (pages * local_page_size as u64) as usize)
|
||||
};
|
||||
slice.fill(0);
|
||||
segment.vend = self.virt_alloc_end;
|
||||
@ -304,15 +319,15 @@ impl BootSetup<'_> {
|
||||
Ok(segment)
|
||||
}
|
||||
|
||||
fn alloc_page(&mut self, arch: &mut dyn ArchBootSetup) -> Result<DomainSegment> {
|
||||
fn alloc_page(&mut self, page_size: u64) -> Result<DomainSegment> {
|
||||
let start = self.virt_alloc_end;
|
||||
let pfn = self.pfn_alloc_end;
|
||||
|
||||
self.chk_alloc_pages(arch, 1)?;
|
||||
self.chk_alloc_pages(page_size, 1)?;
|
||||
debug!("alloc_page {:#x} (pfn {:#x})", start, pfn);
|
||||
Ok(DomainSegment {
|
||||
vstart: start,
|
||||
vend: (start + arch.page_size()) - 1,
|
||||
vend: (start + page_size) - 1,
|
||||
pfn,
|
||||
addr: 0,
|
||||
size: 0,
|
||||
@ -321,31 +336,29 @@ impl BootSetup<'_> {
|
||||
})
|
||||
}
|
||||
|
||||
fn alloc_module(
|
||||
&mut self,
|
||||
arch: &mut dyn ArchBootSetup,
|
||||
buffer: &[u8],
|
||||
) -> Result<DomainSegment> {
|
||||
let segment = self.alloc_segment(arch, 0, buffer.len() as u64)?;
|
||||
async fn alloc_module(&mut self, page_size: u64, buffer: &[u8]) -> Result<DomainSegment> {
|
||||
let segment = self
|
||||
.alloc_segment(page_size, 0, buffer.len() as u64)
|
||||
.await?;
|
||||
let slice = unsafe { slice::from_raw_parts_mut(segment.addr as *mut u8, buffer.len()) };
|
||||
copy(slice, buffer);
|
||||
Ok(segment)
|
||||
}
|
||||
|
||||
fn alloc_padding_pages(&mut self, arch: &mut dyn ArchBootSetup, boundary: u64) -> Result<()> {
|
||||
if (boundary & (arch.page_size() - 1)) != 0 {
|
||||
fn alloc_padding_pages(&mut self, page_size: u64, boundary: u64) -> Result<()> {
|
||||
if (boundary & (page_size - 1)) != 0 {
|
||||
return Err(Error::MemorySetupFailed("boundary is incorrect"));
|
||||
}
|
||||
|
||||
if boundary < self.virt_alloc_end {
|
||||
return Err(Error::MemorySetupFailed("boundary is below allocation end"));
|
||||
}
|
||||
let pages = (boundary - self.virt_alloc_end) / arch.page_size();
|
||||
self.chk_alloc_pages(arch, pages)?;
|
||||
let pages = (boundary - self.virt_alloc_end) / page_size;
|
||||
self.chk_alloc_pages(page_size, pages)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn chk_alloc_pages(&mut self, arch: &mut dyn ArchBootSetup, pages: u64) -> Result<()> {
|
||||
fn chk_alloc_pages(&mut self, page_size: u64, pages: u64) -> Result<()> {
|
||||
if pages > self.total_pages
|
||||
|| self.pfn_alloc_end > self.total_pages
|
||||
|| pages > self.total_pages - self.pfn_alloc_end
|
||||
@ -354,47 +367,56 @@ impl BootSetup<'_> {
|
||||
}
|
||||
|
||||
self.pfn_alloc_end += pages;
|
||||
self.virt_alloc_end += pages * arch.page_size();
|
||||
self.virt_alloc_end += pages * page_size;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait ArchBootSetup {
|
||||
fn page_size(&mut self) -> u64;
|
||||
fn page_shift(&mut self) -> u64;
|
||||
|
||||
fn needs_early_kernel(&mut self) -> bool;
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<Option<DomainSegment>>;
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
) -> Result<Option<DomainSegment>>;
|
||||
|
||||
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn setup_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &mut BootState,
|
||||
) -> Result<()>;
|
||||
|
||||
fn setup_start_info(
|
||||
async fn setup_start_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()>;
|
||||
|
||||
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()>;
|
||||
async fn setup_shared_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
shared_info_frame: u64,
|
||||
) -> Result<()>;
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
kernel_segment: &Option<DomainSegment>,
|
||||
initrd_segment: &Option<DomainSegment>,
|
||||
) -> Result<()>;
|
||||
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ pub enum Error {
|
||||
ElfParseFailed(#[from] elf::ParseError),
|
||||
#[error("mmap failed")]
|
||||
MmapFailed,
|
||||
#[error("munmap failed")]
|
||||
UnmapFailed,
|
||||
#[error("munmap failed: {0}")]
|
||||
UnmapFailed(nix::errno::Errno),
|
||||
#[error("memory setup failed: {0}")]
|
||||
MemorySetupFailed(&'static str),
|
||||
#[error("populate physmap failed: wanted={0}, received={1}, input_extents={2}")]
|
||||
|
@ -16,7 +16,7 @@ pub mod arm64;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use crate::arm64::Arm64BootSetup;
|
||||
|
||||
use crate::boot::BootSetup;
|
||||
use crate::boot::{ArchBootSetup, BootSetup};
|
||||
use crate::elfloader::ElfImageLoader;
|
||||
use crate::error::{Error, Result};
|
||||
use boot::BootState;
|
||||
@ -34,6 +34,7 @@ use xenstore::{
|
||||
XsPermission, XsdClient, XsdInterface, XS_PERM_NONE, XS_PERM_READ, XS_PERM_READ_WRITE,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct XenClient {
|
||||
pub store: XsdClient,
|
||||
call: XenCall,
|
||||
@ -115,7 +116,7 @@ impl XenClient {
|
||||
Ok(XenClient { store, call })
|
||||
}
|
||||
|
||||
pub async fn create(&mut self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
|
||||
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
|
||||
let mut domain = CreateDomain {
|
||||
max_vcpus: config.max_vcpus,
|
||||
..Default::default()
|
||||
@ -125,7 +126,7 @@ impl XenClient {
|
||||
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
|
||||
}
|
||||
|
||||
let domid = self.call.create_domain(domain)?;
|
||||
let domid = self.call.create_domain(domain).await?;
|
||||
match self.init(domid, &domain, config).await {
|
||||
Ok(created) => Ok(created),
|
||||
Err(err) => {
|
||||
@ -138,7 +139,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn init(
|
||||
&mut self,
|
||||
&self,
|
||||
domid: u32,
|
||||
domain: &CreateDomain,
|
||||
config: &DomainConfig<'_>,
|
||||
@ -253,8 +254,8 @@ impl XenClient {
|
||||
tx.commit().await?;
|
||||
}
|
||||
|
||||
self.call.set_max_vcpus(domid, config.max_vcpus)?;
|
||||
self.call.set_max_mem(domid, config.mem_mb * 1024)?;
|
||||
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
||||
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
|
||||
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
|
||||
|
||||
let xenstore_evtchn: u32;
|
||||
@ -265,19 +266,21 @@ impl XenClient {
|
||||
{
|
||||
let mut boot = BootSetup::new(&self.call, domid);
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
let mut arch = X86BootSetup::new();
|
||||
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
let mut arch = Arm64BootSetup::new();
|
||||
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||
let initrd = read(config.initrd_path)?;
|
||||
state = boot.initialize(
|
||||
&mut arch,
|
||||
&image_loader,
|
||||
initrd.as_slice(),
|
||||
config.max_vcpus,
|
||||
config.mem_mb,
|
||||
1,
|
||||
)?;
|
||||
boot.boot(&mut arch, &mut state, config.cmdline)?;
|
||||
state = boot
|
||||
.initialize(
|
||||
&mut arch,
|
||||
&image_loader,
|
||||
initrd.as_slice(),
|
||||
config.max_vcpus,
|
||||
config.mem_mb,
|
||||
1,
|
||||
)
|
||||
.await?;
|
||||
boot.boot(&mut arch, &mut state, config.cmdline).await?;
|
||||
xenstore_evtchn = state.store_evtchn;
|
||||
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
|
||||
p2m = boot.phys.p2m;
|
||||
@ -418,7 +421,8 @@ impl XenClient {
|
||||
for channel in &config.event_channels {
|
||||
let id = self
|
||||
.call
|
||||
.evtchn_alloc_unbound(domid, config.backend_domid)?;
|
||||
.evtchn_alloc_unbound(domid, config.backend_domid)
|
||||
.await?;
|
||||
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
|
||||
self.store
|
||||
.write_string(&format!("{}/name", channel_path), channel.name)
|
||||
@ -428,12 +432,12 @@ impl XenClient {
|
||||
.await?;
|
||||
}
|
||||
|
||||
self.call.unpause_domain(domid)?;
|
||||
self.call.unpause_domain(domid).await?;
|
||||
Ok(CreatedDomain { domid, channels })
|
||||
}
|
||||
|
||||
async fn disk_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -486,7 +490,7 @@ impl XenClient {
|
||||
|
||||
#[allow(clippy::too_many_arguments, clippy::unnecessary_unwrap)]
|
||||
async fn console_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
channel: &DomainChannel,
|
||||
p2m: &[u64],
|
||||
state: &BootState,
|
||||
@ -553,7 +557,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn fs_9p_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -591,7 +595,7 @@ impl XenClient {
|
||||
}
|
||||
|
||||
async fn vif_device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
dom_path: &str,
|
||||
backend_dom_path: &str,
|
||||
backend_domid: u32,
|
||||
@ -650,7 +654,7 @@ impl XenClient {
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn device_add(
|
||||
&mut self,
|
||||
&self,
|
||||
typ: &str,
|
||||
id: u64,
|
||||
dom_path: &str,
|
||||
@ -713,15 +717,15 @@ impl XenClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn destroy(&mut self, domid: u32) -> Result<()> {
|
||||
pub async fn destroy(&self, domid: u32) -> Result<()> {
|
||||
if let Err(err) = self.destroy_store(domid).await {
|
||||
warn!("failed to destroy store for domain {}: {}", domid, err);
|
||||
}
|
||||
self.call.destroy_domain(domid)?;
|
||||
self.call.destroy_domain(domid).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn destroy_store(&mut self, domid: u32) -> Result<()> {
|
||||
async fn destroy_store(&self, domid: u32) -> Result<()> {
|
||||
let dom_path = self.store.get_domain_path(domid).await?;
|
||||
let vm_path = self.store.read_string(&format!("{}/vm", dom_path)).await?;
|
||||
if vm_path.is_none() {
|
||||
@ -813,7 +817,7 @@ impl XenClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_console_path(&mut self, domid: u32) -> Result<String> {
|
||||
pub async fn get_console_path(&self, domid: u32) -> Result<String> {
|
||||
let dom_path = self.store.get_domain_path(domid).await?;
|
||||
let console_tty_path = format!("{}/console/tty", dom_path);
|
||||
let mut tty: Option<String> = None;
|
||||
|
@ -3,6 +3,7 @@ use crate::sys::{XEN_PAGE_SHIFT, XEN_PAGE_SIZE};
|
||||
use crate::Error;
|
||||
use libc::munmap;
|
||||
use log::debug;
|
||||
use nix::errno::Errno;
|
||||
use std::ffi::c_void;
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
@ -45,7 +46,7 @@ impl PhysicalPages<'_> {
|
||||
self.p2m.len() as u64
|
||||
}
|
||||
|
||||
pub fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
pub async fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
for page in &self.pages {
|
||||
if pfn >= page.pfn + page.count {
|
||||
continue;
|
||||
@ -76,10 +77,10 @@ impl PhysicalPages<'_> {
|
||||
return Err(Error::MemorySetupFailed("page count is zero"));
|
||||
}
|
||||
|
||||
self.pfn_alloc(pfn, count)
|
||||
self.pfn_alloc(pfn, count).await
|
||||
}
|
||||
|
||||
fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
async fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
|
||||
let mut entries = vec![MmapEntry::default(); count as usize];
|
||||
for (i, entry) in entries.iter_mut().enumerate() {
|
||||
entry.mfn = self.p2m[pfn as usize + i];
|
||||
@ -98,9 +99,13 @@ impl PhysicalPages<'_> {
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, actual_mmap_len)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
|
||||
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
|
||||
let result = self
|
||||
.call
|
||||
.mmap_batch(self.domid, num as u64, addr, pfns)
|
||||
.await?;
|
||||
if result != 0 {
|
||||
return Err(Error::MmapFailed);
|
||||
}
|
||||
@ -117,7 +122,7 @@ impl PhysicalPages<'_> {
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
pub fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
|
||||
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
|
||||
let num = ((size + XEN_PAGE_SIZE - 1) >> XEN_PAGE_SHIFT) as usize;
|
||||
let mut pfns = vec![u64::MAX; num];
|
||||
for (i, item) in pfns.iter_mut().enumerate().take(num) {
|
||||
@ -128,9 +133,13 @@ impl PhysicalPages<'_> {
|
||||
let addr = self
|
||||
.call
|
||||
.mmap(0, actual_mmap_len)
|
||||
.await
|
||||
.ok_or(Error::MmapFailed)?;
|
||||
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
|
||||
let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
|
||||
let result = self
|
||||
.call
|
||||
.mmap_batch(self.domid, num as u64, addr, pfns)
|
||||
.await?;
|
||||
if result != 0 {
|
||||
return Err(Error::MmapFailed);
|
||||
}
|
||||
@ -155,7 +164,7 @@ impl PhysicalPages<'_> {
|
||||
(page.count << ARCH_PAGE_SHIFT) as usize,
|
||||
);
|
||||
if err != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(err)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -181,7 +190,7 @@ impl PhysicalPages<'_> {
|
||||
page.ptr
|
||||
);
|
||||
if err != 0 {
|
||||
return Err(Error::UnmapFailed);
|
||||
return Err(Error::UnmapFailed(Errno::from_raw(err)));
|
||||
}
|
||||
self.pages.remove(i);
|
||||
}
|
||||
|
@ -275,6 +275,7 @@ impl X86BootSetup {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ArchBootSetup for X86BootSetup {
|
||||
fn page_size(&mut self) -> u64 {
|
||||
X86_PAGE_SIZE
|
||||
@ -288,7 +289,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
false
|
||||
}
|
||||
|
||||
fn alloc_p2m_segment(
|
||||
async fn alloc_p2m_segment(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
@ -310,11 +311,13 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
self.table.mappings_count += 1;
|
||||
p2m_alloc_size += (pgtables << X86_PAGE_SHIFT) as u64;
|
||||
let p2m_segment = setup.alloc_segment(self, 0, p2m_alloc_size)?;
|
||||
let p2m_segment = setup
|
||||
.alloc_segment(self.page_size(), 0, p2m_alloc_size)
|
||||
.await?;
|
||||
Ok(Some(p2m_segment))
|
||||
}
|
||||
|
||||
fn alloc_page_tables(
|
||||
async fn alloc_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
image_info: &BootImageInfo,
|
||||
@ -341,7 +344,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
self.table.mappings_count += 1;
|
||||
setup.virt_pgtab_end = try_virt_end + 1;
|
||||
let size = self.table.mappings[m].area.pgtables as u64 * X86_PAGE_SIZE;
|
||||
let segment = setup.alloc_segment(self, 0, size)?;
|
||||
let segment = setup.alloc_segment(self.page_size(), 0, size).await?;
|
||||
debug!(
|
||||
"alloc_page_tables table={:?} segment={:?}",
|
||||
self.table, segment
|
||||
@ -349,7 +352,11 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(Some(segment))
|
||||
}
|
||||
|
||||
fn setup_page_tables(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn setup_page_tables(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &mut BootState,
|
||||
) -> Result<()> {
|
||||
let p2m_segment = state
|
||||
.p2m_segment
|
||||
.as_ref()
|
||||
@ -364,7 +371,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
let map1 = &self.table.mappings[m1];
|
||||
let from = map1.levels[l].from;
|
||||
let to = map1.levels[l].to;
|
||||
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0)? as *mut u64;
|
||||
let pg_ptr = setup.phys.pfn_to_ptr(map1.levels[l].pfn, 0).await? as *mut u64;
|
||||
for m2 in 0usize..self.table.mappings_count {
|
||||
let map2 = &self.table.mappings[m2];
|
||||
let lvl = if l > 0 {
|
||||
@ -407,13 +414,16 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_start_info(
|
||||
async fn setup_start_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
state: &BootState,
|
||||
cmdline: &str,
|
||||
) -> Result<()> {
|
||||
let ptr = setup.phys.pfn_to_ptr(state.start_info_segment.pfn, 1)?;
|
||||
let ptr = setup
|
||||
.phys
|
||||
.pfn_to_ptr(state.start_info_segment.pfn, 1)
|
||||
.await?;
|
||||
let byte_slice =
|
||||
unsafe { slice::from_raw_parts_mut(ptr as *mut u8, X86_PAGE_SIZE as usize) };
|
||||
byte_slice.fill(0);
|
||||
@ -456,11 +466,15 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_shared_info(&mut self, setup: &mut BootSetup, shared_info_frame: u64) -> Result<()> {
|
||||
async fn setup_shared_info(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
shared_info_frame: u64,
|
||||
) -> Result<()> {
|
||||
let info = setup
|
||||
.phys
|
||||
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)?
|
||||
as *mut SharedInfo;
|
||||
.map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
|
||||
.await? as *mut SharedInfo;
|
||||
unsafe {
|
||||
let size = size_of::<SharedInfo>();
|
||||
let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
|
||||
@ -473,14 +487,14 @@ impl ArchBootSetup for X86BootSetup {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn meminit(
|
||||
async fn meminit(
|
||||
&mut self,
|
||||
setup: &mut BootSetup,
|
||||
total_pages: u64,
|
||||
_: &Option<DomainSegment>,
|
||||
_: &Option<DomainSegment>,
|
||||
) -> Result<()> {
|
||||
setup.call.claim_pages(setup.domid, total_pages)?;
|
||||
setup.call.claim_pages(setup.domid, total_pages).await?;
|
||||
let mut vmemranges: Vec<VmemRange> = Vec::new();
|
||||
let stub = VmemRange {
|
||||
start: 0,
|
||||
@ -530,13 +544,16 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
|
||||
let extents_init_slice = extents_init.as_slice();
|
||||
let extents = setup.call.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
SUPERPAGE_2MB_SHIFT as u32,
|
||||
0,
|
||||
&extents_init_slice[0usize..count as usize],
|
||||
)?;
|
||||
let extents = setup
|
||||
.call
|
||||
.populate_physmap(
|
||||
setup.domid,
|
||||
count,
|
||||
SUPERPAGE_2MB_SHIFT as u32,
|
||||
0,
|
||||
&extents_init_slice[0usize..count as usize],
|
||||
)
|
||||
.await?;
|
||||
|
||||
pfn = pfn_base_idx;
|
||||
for mfn in extents {
|
||||
@ -558,10 +575,10 @@ impl ArchBootSetup for X86BootSetup {
|
||||
let p2m_idx = (pfn_base + j) as usize;
|
||||
let p2m_end_idx = p2m_idx + allocsz as usize;
|
||||
let input_extent_starts = &p2m[p2m_idx..p2m_end_idx];
|
||||
let result =
|
||||
setup
|
||||
.call
|
||||
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)?;
|
||||
let result = setup
|
||||
.call
|
||||
.populate_physmap(setup.domid, allocsz, 0, 0, input_extent_starts)
|
||||
.await?;
|
||||
|
||||
if result.len() != allocsz as usize {
|
||||
return Err(Error::PopulatePhysmapFailed(
|
||||
@ -581,11 +598,11 @@ impl ArchBootSetup for X86BootSetup {
|
||||
}
|
||||
|
||||
setup.phys.load_p2m(p2m);
|
||||
setup.call.claim_pages(setup.domid, 0)?;
|
||||
setup.call.claim_pages(setup.domid, 0).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let p2m_segment = state
|
||||
.p2m_segment
|
||||
.as_ref()
|
||||
@ -600,11 +617,12 @@ impl ArchBootSetup for X86BootSetup {
|
||||
setup.phys.unmap(p2m_segment.pfn)?;
|
||||
setup
|
||||
.call
|
||||
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)?;
|
||||
.mmuext(setup.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
|
||||
let page_table_segment = state
|
||||
.page_table_segment
|
||||
.as_ref()
|
||||
@ -633,7 +651,7 @@ impl ArchBootSetup for X86BootSetup {
|
||||
vcpu.kernel_ss = vcpu.user_regs.ss as u64;
|
||||
vcpu.kernel_sp = vcpu.user_regs.rsp;
|
||||
trace!("vcpu context: {:?}", vcpu);
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu)?;
|
||||
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user