Files
krata/crates/xen/xenplatform/src/mem.rs

220 lines
6.2 KiB
Rust
Raw Normal View History

2024-01-30 17:42:55 -08:00
use crate::error::Result;
use crate::sys::XEN_PAGE_SHIFT;
2024-01-30 17:42:55 -08:00
use crate::Error;
2024-01-15 18:50:12 -08:00
use libc::munmap;
use log::debug;
2024-04-02 00:56:18 +00:00
use nix::errno::Errno;
2024-01-15 18:50:12 -08:00
use std::ffi::c_void;
use std::slice;
2024-03-10 00:36:23 -08:00
use xencall::sys::MmapEntry;
use xencall::XenCall;
#[derive(Debug, Clone)]
pub struct PhysicalPage {
pfn: u64,
pub ptr: u64,
2024-01-10 19:18:48 -08:00
count: u64,
}
pub struct PhysicalPages {
page_shift: u64,
domid: u32,
pub p2m: Vec<u64>,
call: XenCall,
pages: Vec<PhysicalPage>,
}
impl PhysicalPages {
pub fn new(call: XenCall, domid: u32, page_shift: u64) -> PhysicalPages {
PhysicalPages {
page_shift,
domid,
p2m: Vec::new(),
call,
pages: Vec::new(),
}
}
pub fn load_p2m(&mut self, p2m: Vec<u64>) {
self.p2m = p2m;
}
pub fn p2m_size(&mut self) -> u64 {
self.p2m.len() as u64
}
2024-04-02 00:56:18 +00:00
pub async fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
for page in &self.pages {
2024-01-10 19:18:48 -08:00
if pfn >= page.pfn + page.count {
continue;
}
if count > 0 {
if (pfn + count) <= page.pfn {
continue;
}
2024-01-10 19:18:48 -08:00
if pfn < page.pfn || (pfn + count) > page.pfn + page.count {
2024-03-21 21:31:10 +00:00
return Err(Error::MemorySetupFailed("pfn is out of range"));
}
} else {
if pfn < page.pfn {
continue;
}
2024-01-10 19:18:48 -08:00
if pfn >= page.pfn + page.count {
continue;
}
}
return Ok(page.ptr + ((pfn - page.pfn) << self.page_shift));
}
if count == 0 {
2024-03-21 21:31:10 +00:00
return Err(Error::MemorySetupFailed("page count is zero"));
}
2024-04-02 00:56:18 +00:00
self.pfn_alloc(pfn, count).await
}
2024-04-02 00:56:18 +00:00
async fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
let mut entries = vec![MmapEntry::default(); count as usize];
2024-01-15 16:25:06 -08:00
for (i, entry) in entries.iter_mut().enumerate() {
if !self.p2m.is_empty() {
entry.mfn = self.p2m[pfn as usize + i];
} else {
entry.mfn = pfn + i as u64;
}
}
let chunk_size = 1 << XEN_PAGE_SHIFT;
let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
2024-01-15 16:25:06 -08:00
let num = num_per_entry * count as usize;
let mut pfns = vec![u64::MAX; num];
for i in 0..count as usize {
for j in 0..num_per_entry {
pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
}
}
2024-01-15 16:25:06 -08:00
let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
let addr = self
.call
2024-01-15 16:25:06 -08:00
.mmap(0, actual_mmap_len)
2024-04-02 00:56:18 +00:00
.await
2024-01-30 18:02:32 -08:00
.ok_or(Error::MmapFailed)?;
2024-01-16 19:25:38 -08:00
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
2024-04-02 00:56:18 +00:00
let result = self
.call
.mmap_batch(self.domid, num as u64, addr, pfns)
.await?;
2024-01-15 16:25:06 -08:00
if result != 0 {
2024-01-30 18:02:32 -08:00
return Err(Error::MmapFailed);
2024-01-15 16:25:06 -08:00
}
let page = PhysicalPage {
pfn,
ptr: addr,
2024-01-10 19:18:48 -08:00
count,
};
debug!(
"alloc_pfn {:#x}+{:#x} at {:#x}",
page.pfn, page.count, page.ptr
);
self.pages.push(page);
Ok(addr)
}
2024-01-15 18:50:12 -08:00
2024-04-02 00:56:18 +00:00
pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
let count = (size >> XEN_PAGE_SHIFT) as usize;
let mut entries = vec![MmapEntry::default(); count];
for (i, entry) in entries.iter_mut().enumerate() {
entry.mfn = mfn + i as u64;
}
let chunk_size = 1 << XEN_PAGE_SHIFT;
let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
let num = num_per_entry * count;
2024-01-16 23:07:34 -08:00
let mut pfns = vec![u64::MAX; num];
for i in 0..count {
for j in 0..num_per_entry {
pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
}
2024-01-16 23:07:34 -08:00
}
let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
let addr = self
.call
.mmap(0, actual_mmap_len)
2024-04-02 00:56:18 +00:00
.await
2024-01-30 18:02:32 -08:00
.ok_or(Error::MmapFailed)?;
2024-01-16 23:07:34 -08:00
debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
2024-04-02 00:56:18 +00:00
let result = self
.call
.mmap_batch(self.domid, num as u64, addr, pfns)
.await?;
2024-01-16 23:07:34 -08:00
if result != 0 {
2024-01-30 18:02:32 -08:00
return Err(Error::MmapFailed);
2024-01-16 23:07:34 -08:00
}
let page = PhysicalPage {
pfn: mfn,
2024-01-16 23:07:34 -08:00
ptr: addr,
count: count as u64,
2024-01-16 23:07:34 -08:00
};
debug!(
"alloc_mfn {:#x}+{:#x} at {:#x}",
page.pfn, page.count, page.ptr
);
self.pages.push(page);
Ok(addr)
}
pub async fn clear_pages(&mut self, pfn: u64, count: u64) -> Result<()> {
let ptr = self.pfn_to_ptr(pfn, count).await?;
let slice = unsafe {
slice::from_raw_parts_mut(ptr as *mut u8, (count * (1 << self.page_shift)) as usize)
};
slice.fill(0);
Ok(())
}
2024-01-30 17:42:55 -08:00
pub fn unmap_all(&mut self) -> Result<()> {
for page in &self.pages {
unsafe {
let err = munmap(
page.ptr as *mut c_void,
(page.count << self.page_shift) as usize,
);
if err != 0 {
2024-04-02 00:56:18 +00:00
return Err(Error::UnmapFailed(Errno::from_raw(err)));
}
}
}
self.pages.clear();
Ok(())
}
2024-01-30 17:42:55 -08:00
pub fn unmap(&mut self, pfn: u64) -> Result<()> {
2024-01-16 23:07:34 -08:00
let page = self.pages.iter().enumerate().find(|(_, x)| x.pfn == pfn);
2024-01-15 18:50:12 -08:00
if page.is_none() {
2024-03-21 21:31:10 +00:00
return Err(Error::MemorySetupFailed("cannot unmap missing page"));
2024-01-16 19:25:38 -08:00
}
2024-01-16 23:07:34 -08:00
let (i, page) = page.unwrap();
2024-01-16 19:25:38 -08:00
unsafe {
let err = munmap(
page.ptr as *mut c_void,
(page.count << self.page_shift) as usize,
2024-01-16 19:25:38 -08:00
);
debug!(
"unmapped {:#x} foreign bytes at {:#x}",
(page.count << self.page_shift) as usize,
2024-01-16 19:25:38 -08:00
page.ptr
);
if err != 0 {
2024-04-02 00:56:18 +00:00
return Err(Error::UnmapFailed(Errno::from_raw(err)));
2024-01-16 19:25:38 -08:00
}
2024-01-16 23:07:34 -08:00
self.pages.remove(i);
2024-01-15 18:50:12 -08:00
}
Ok(())
}
}