debugging for fixing page table issues

This commit is contained in:
Alex Zenla
2024-01-15 16:25:06 -08:00
parent 900eba0bd9
commit dfc3dc8e90
2 changed files with 21 additions and 11 deletions

View File

@ -233,16 +233,16 @@ impl BootSetup<'_> {
debug!("BootSetup initialize image_info={:?}", image_info); debug!("BootSetup initialize image_info={:?}", image_info);
self.virt_alloc_end = image_info.virt_base; self.virt_alloc_end = image_info.virt_base;
let kernel_segment = self.load_kernel_segment(image_loader, &image_info)?; let kernel_segment = self.load_kernel_segment(image_loader, &image_info)?;
let mut page_table = PageTable::default();
let p2m_segment = self.alloc_p2m_segment(&mut page_table, &image_info)?;
let start_info_segment = self.alloc_page()?; let start_info_segment = self.alloc_page()?;
let xenstore_segment = self.alloc_page()?; let xenstore_segment = self.alloc_page()?;
let console_segment = self.alloc_page()?; let console_segment = self.alloc_page()?;
let mut page_table = PageTable::default();
let page_table_segment = self.alloc_page_tables(&mut page_table, &image_info)?; let page_table_segment = self.alloc_page_tables(&mut page_table, &image_info)?;
let boot_stack_segment = self.alloc_page()?; let boot_stack_segment = self.alloc_page()?;
if self.virt_pgtab_end > 0 { if self.virt_pgtab_end > 0 {
self.alloc_padding_pages(self.virt_pgtab_end)?; self.alloc_padding_pages(self.virt_pgtab_end)?;
} }
let p2m_segment = self.alloc_p2m_segment(&mut page_table, &image_info)?;
let state = BootState { let state = BootState {
kernel_segment, kernel_segment,
start_info_segment, start_info_segment,
@ -338,8 +338,15 @@ impl BootSetup<'_> {
let prot = self.get_pg_prot(lvl_idx, pfn, &state.page_table); let prot = self.get_pg_prot(lvl_idx, pfn, &state.page_table);
let pfn_paddr = self.phys.p2m[pfn as usize] << X86_PAGE_SHIFT; let pfn_paddr = self.phys.p2m[pfn as usize] << X86_PAGE_SHIFT;
let value = pfn_paddr | prot;
if pfn == state.page_table_segment.pfn {
debug!(
"pgtable pfn: {:#x}, p: {:#x}, pfn_paddr: {:#x}, value: {:#x}",
pfn, p, pfn_paddr, value
);
}
unsafe { unsafe {
*pg.add(p as usize) = pfn_paddr | prot; *pg.add(p as usize) = value;
} }
pfn += 1; pfn += 1;
} }

View File

@ -74,25 +74,28 @@ impl PhysicalPages<'_> {
fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64, XenClientError> { fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64, XenClientError> {
let mut entries = vec![MmapEntry::default(); count as usize]; let mut entries = vec![MmapEntry::default(); count as usize];
for (i, entry) in (0_u64..).zip(entries.iter_mut()) { for (i, entry) in entries.iter_mut().enumerate() {
entry.mfn = self.p2m[(pfn + i) as usize]; entry.mfn = self.p2m[pfn as usize + i];
} }
let chunk_size = 1 << XEN_PAGE_SHIFT; let chunk_size = 1 << XEN_PAGE_SHIFT;
let num_per_entry = chunk_size >> XEN_PAGE_SHIFT; let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
let num = num_per_entry * entries.len(); let num = num_per_entry * count as usize;
let mut pfns = vec![0u64; num]; let mut pfns = vec![u64::MAX; num];
for i in 0..entries.len() { for i in 0..count as usize {
for j in 0..num_per_entry { for j in 0..num_per_entry {
pfns[i * num_per_entry + j] = entries[i].mfn + j as u64; pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
} }
} }
let size = count << XEN_PAGE_SHIFT; let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
let addr = self let addr = self
.call .call
.mmap(0, size) .mmap(0, actual_mmap_len)
.ok_or(XenClientError::new("failed to mmap address"))?; .ok_or(XenClientError::new("failed to mmap address"))?;
self.call.mmap_batch(self.domid, num as u64, addr, pfns)?; let result = self.call.mmap_batch(self.domid, num as u64, addr, pfns)?;
if result != 0 {
return Err(XenClientError::new("mmap_batch call failed"));
}
let page = PhysicalPage { let page = PhysicalPage {
pfn, pfn,
ptr: addr, ptr: addr,