feat: utilize vfs for assembling oci images

This commit is contained in:
Alex Zenla 2024-04-15 10:30:36 +00:00
parent fdc8f4d634
commit 5169a97d43
No known key found for this signature in database
GPG Key ID: 067B238899B51269
7 changed files with 448 additions and 287 deletions

View File

@ -37,7 +37,7 @@ pub async fn pull_interactive_progress(
if progresses.is_empty() && !oci.layers.is_empty() {
for layer in &oci.layers {
let bar = ProgressBar::new(layer.total);
bar.set_style(ProgressStyle::with_template("{msg} {wide_bar}").unwrap());
bar.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert(layer.id.clone(), bar.clone());
multi_progress.add(bar);
}
@ -98,7 +98,7 @@ pub async fn pull_interactive_progress(
if progresses.is_empty() {
let progress = ProgressBar::new(100);
progress.set_message("packing ");
progress.set_style(ProgressStyle::with_template("{msg} {wide_bar}").unwrap());
progress.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert("packing".to_string(), progress);
}
let Some(progress) = progresses.get("packing") else {

View File

@ -1,11 +1,12 @@
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
use crate::progress::OciBoundProgress;
use crate::vfs::{VfsNode, VfsTree};
use anyhow::{anyhow, Result};
use log::{debug, trace};
use log::{debug, trace, warn};
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
use tokio::fs;
use tokio::io::AsyncRead;
use tokio_stream::StreamExt;
@ -14,9 +15,9 @@ use uuid::Uuid;
pub struct OciImageAssembled {
pub digest: String,
pub path: PathBuf,
pub manifest: ImageManifest,
pub config: ImageConfiguration,
pub vfs: Arc<VfsTree>,
pub tmp_dir: Option<PathBuf>,
}
@ -35,7 +36,7 @@ pub struct OciImageAssembler {
resolved: OciResolvedImage,
progress: OciBoundProgress,
work_dir: PathBuf,
target_dir: PathBuf,
disk_dir: PathBuf,
tmp_dir: Option<PathBuf>,
}
@ -45,9 +46,9 @@ impl OciImageAssembler {
resolved: OciResolvedImage,
progress: OciBoundProgress,
work_dir: Option<PathBuf>,
target_dir: Option<PathBuf>,
disk_dir: Option<PathBuf>,
) -> Result<OciImageAssembler> {
let tmp_dir = if work_dir.is_none() || target_dir.is_none() {
let tmp_dir = if work_dir.is_none() || disk_dir.is_none() {
let mut tmp_dir = std::env::temp_dir().clone();
tmp_dir.push(format!("oci-assemble-{}", Uuid::new_v4()));
Some(tmp_dir)
@ -65,7 +66,7 @@ impl OciImageAssembler {
tmp_dir
};
let target_dir = if let Some(target_dir) = target_dir {
let target_dir = if let Some(target_dir) = disk_dir {
target_dir
} else {
let mut tmp_dir = tmp_dir
@ -83,7 +84,7 @@ impl OciImageAssembler {
resolved,
progress,
work_dir,
target_dir,
disk_dir: target_dir,
tmp_dir,
})
}
@ -101,6 +102,7 @@ impl OciImageAssembler {
.downloader
.download(self.resolved.clone(), layer_dir)
.await?;
let mut vfs = VfsTree::new();
for layer in &local.layers {
debug!(
"process layer digest={} compression={:?}",
@ -111,7 +113,7 @@ impl OciImageAssembler {
progress.extracting_layer(&layer.digest, 0, 1);
})
.await;
let (whiteouts, count) = self.process_layer_whiteout(layer).await?;
let (whiteouts, count) = self.process_layer_whiteout(&mut vfs, layer).await?;
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, 0, count);
@ -154,7 +156,9 @@ impl OciImageAssembler {
if name.starts_with(".wh.") {
continue;
} else {
self.process_write_entry(&mut entry, layer).await?;
vfs.insert_tar_entry(&entry)?;
self.process_write_entry(&mut vfs, &mut entry, layer)
.await?;
}
}
self.progress
@ -163,22 +167,25 @@ impl OciImageAssembler {
})
.await;
}
for layer in &local.layers {
if layer.path.exists() {
fs::remove_file(&layer.path).await?;
}
}
Ok(OciImageAssembled {
vfs: Arc::new(vfs),
digest: self.resolved.digest,
path: self.target_dir,
manifest: self.resolved.manifest,
config: local.config,
tmp_dir: self.tmp_dir,
})
}
async fn process_layer_whiteout(&self, layer: &OciImageLayer) -> Result<(Vec<String>, usize)> {
async fn process_layer_whiteout(
&self,
vfs: &mut VfsTree,
layer: &OciImageLayer,
) -> Result<(Vec<String>, usize)> {
let mut whiteouts = Vec::new();
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
@ -195,7 +202,9 @@ impl OciImageAssembler {
};
if name.starts_with(".wh.") {
let path = self.process_whiteout_entry(&entry, name, layer).await?;
let path = self
.process_whiteout_entry(vfs, &entry, name, layer)
.await?;
if let Some(path) = path {
whiteouts.push(path);
}
@ -206,13 +215,12 @@ impl OciImageAssembler {
async fn process_whiteout_entry(
&self,
vfs: &mut VfsTree,
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
name: &str,
layer: &OciImageLayer,
) -> Result<Option<String>> {
let path = entry.path()?;
let mut dst = self.check_safe_entry(path.clone())?;
dst.pop();
let mut path = path.to_path_buf();
path.pop();
@ -220,9 +228,7 @@ impl OciImageAssembler {
if !opaque {
let file = &name[4..];
dst.push(file);
path.push(file);
self.check_safe_path(&dst)?;
}
trace!("whiteout entry layer={} path={:?}", &layer.digest, path,);
@ -232,37 +238,14 @@ impl OciImageAssembler {
.ok_or(anyhow!("unable to convert path to string"))?
.to_string();
if opaque {
if dst.is_dir() {
let mut reader = fs::read_dir(dst).await?;
while let Some(entry) = reader.next_entry().await? {
let path = entry.path();
if path.is_symlink() || path.is_file() {
fs::remove_file(&path).await?;
} else if path.is_dir() {
fs::remove_dir_all(&path).await?;
} else {
return Err(anyhow!("opaque whiteout entry did not exist"));
}
}
} else {
debug!(
"whiteout opaque entry missing locally layer={} path={:?} local={:?}",
&layer.digest,
entry.path()?,
dst,
);
}
} else if dst.is_file() || dst.is_symlink() {
fs::remove_file(&dst).await?;
} else if dst.is_dir() {
fs::remove_dir_all(&dst).await?;
let removed = vfs.root.remove(&path);
if let Some(removed) = removed {
delete_disk_paths(removed).await?;
} else {
debug!(
"whiteout entry missing locally layer={} path={:?} local={:?}",
trace!(
"whiteout entry layer={} path={:?} did not exist",
&layer.digest,
entry.path()?,
dst,
path
);
}
Ok(if opaque { None } else { Some(whiteout) })
@ -270,52 +253,39 @@ impl OciImageAssembler {
async fn process_write_entry(
&self,
vfs: &mut VfsTree,
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
layer: &OciImageLayer,
) -> Result<()> {
let uid = entry.header().uid()?;
let gid = entry.header().gid()?;
if !entry.header().entry_type().is_file() {
return Ok(());
}
trace!(
"unpack entry layer={} path={:?} type={:?} uid={} gid={}",
"unpack entry layer={} path={:?} type={:?}",
&layer.digest,
entry.path()?,
entry.header().entry_type(),
uid,
gid,
);
entry.set_preserve_mtime(true);
entry.set_preserve_permissions(true);
entry.set_unpack_xattrs(true);
if let Some(path) = entry.unpack_in(&self.target_dir).await? {
if !path.is_symlink() {
std::os::unix::fs::chown(path, Some(uid as u32), Some(gid as u32))?;
}
}
Ok(())
}
fn check_safe_entry(&self, path: Cow<Path>) -> Result<PathBuf> {
let mut dst = self.target_dir.to_path_buf();
dst.push(path);
if let Some(name) = dst.file_name() {
if let Some(name) = name.to_str() {
if name.starts_with(".wh.") {
let copy = dst.clone();
dst.pop();
self.check_safe_path(&dst)?;
return Ok(copy);
}
}
}
self.check_safe_path(&dst)?;
Ok(dst)
}
fn check_safe_path(&self, dst: &Path) -> Result<()> {
let resolved = path_clean::clean(dst);
if !resolved.starts_with(&self.target_dir) {
return Err(anyhow!("layer attempts to work outside image dir"));
}
let path = entry
.unpack_in(&self.disk_dir)
.await?
.ok_or(anyhow!("unpack did not return a path"))?;
vfs.set_disk_path(&entry.path()?, &path)?;
Ok(())
}
}
async fn delete_disk_paths(node: VfsNode) -> Result<()> {
let mut queue = vec![node];
while !queue.is_empty() {
let node = queue.remove(0);
if let Some(ref disk_path) = node.disk_path {
if !disk_path.exists() {
warn!("disk path {:?} does not exist", disk_path);
}
fs::remove_file(disk_path).await?;
}
queue.extend_from_slice(&node.children);
}
Ok(())
}

View File

@ -4,3 +4,4 @@ pub mod name;
pub mod packer;
pub mod progress;
pub mod registry;
pub mod vfs;

View File

@ -1,143 +1,16 @@
use std::{
fs::File,
io::{BufWriter, ErrorKind, Read},
os::unix::fs::{FileTypeExt, MetadataExt, PermissionsExt},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use crate::progress::{OciBoundProgress, OciProgressPhase};
use anyhow::{anyhow, Result};
use backhand::{compression::Compressor, FilesystemCompressor, FilesystemWriter, NodeHeader};
use log::{trace, warn};
use walkdir::WalkDir;
use std::{path::Path, process::Stdio, sync::Arc};
use super::OciPackedFormat;
pub struct OciPackerBackhand {}
impl OciPackerBackend for OciPackerBackhand {
fn pack(&self, progress: OciBoundProgress, directory: &Path, file: &Path) -> Result<()> {
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
});
let mut writer = FilesystemWriter::default();
writer.set_compressor(FilesystemCompressor::new(Compressor::Gzip, None)?);
let walk = WalkDir::new(directory).follow_links(false);
for entry in walk {
let entry = entry?;
let rel = entry
.path()
.strip_prefix(directory)?
.to_str()
.ok_or_else(|| anyhow!("failed to strip prefix of tmpdir"))?;
let rel = format!("/{}", rel);
trace!("squash write {}", rel);
let typ = entry.file_type();
let metadata = std::fs::symlink_metadata(entry.path())?;
let uid = metadata.uid();
let gid = metadata.gid();
let mode = metadata.permissions().mode();
let mtime = metadata.mtime();
if rel == "/" {
writer.set_root_uid(uid);
writer.set_root_gid(gid);
writer.set_root_mode(mode as u16);
continue;
}
let header = NodeHeader {
permissions: mode as u16,
uid,
gid,
mtime: mtime as u32,
};
if typ.is_symlink() {
let symlink = std::fs::read_link(entry.path())?;
let symlink = symlink
.to_str()
.ok_or_else(|| anyhow!("failed to read symlink"))?;
writer.push_symlink(symlink, rel, header)?;
} else if typ.is_dir() {
writer.push_dir(rel, header)?;
} else if typ.is_file() {
writer.push_file(ConsumingFileReader::new(entry.path()), rel, header)?;
} else if typ.is_block_device() {
let device = metadata.dev();
writer.push_block_device(device as u32, rel, header)?;
} else if typ.is_char_device() {
let device = metadata.dev();
writer.push_char_device(device as u32, rel, header)?;
} else if typ.is_fifo() {
writer.push_fifo(rel, header)?;
} else if typ.is_socket() {
writer.push_socket(rel, header)?;
} else {
return Err(anyhow!("invalid file type"));
}
}
let squash_file_path = file
.to_str()
.ok_or_else(|| anyhow!("failed to convert squashfs string"))?;
let file = File::create(file)?;
let mut bufwrite = BufWriter::new(file);
trace!("squash generate: {}", squash_file_path);
writer.write(&mut bufwrite)?;
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
});
Ok(())
}
}
struct ConsumingFileReader {
path: PathBuf,
file: Option<File>,
}
impl ConsumingFileReader {
fn new(path: &Path) -> ConsumingFileReader {
ConsumingFileReader {
path: path.to_path_buf(),
file: None,
}
}
}
impl Read for ConsumingFileReader {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
if self.file.is_none() {
self.file = Some(File::open(&self.path)?);
}
let Some(ref mut file) = self.file else {
return Err(std::io::Error::new(
ErrorKind::NotFound,
"file was not opened",
));
};
file.read(buf)
}
}
impl Drop for ConsumingFileReader {
fn drop(&mut self) {
let file = self.file.take();
drop(file);
if let Err(error) = std::fs::remove_file(&self.path) {
warn!("failed to delete consuming file {:?}: {}", self.path, error);
}
}
}
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
vfs::VfsTree,
};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::{pin, process::Command, select};
#[derive(Debug, Clone, Copy)]
pub enum OciPackerBackendType {
Backhand,
MkSquashfs,
MkfsErofs,
}
@ -145,7 +18,6 @@ pub enum OciPackerBackendType {
impl OciPackerBackendType {
pub fn format(&self) -> OciPackedFormat {
match self {
OciPackerBackendType::Backhand => OciPackedFormat::Squashfs,
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
}
@ -153,9 +25,6 @@ impl OciPackerBackendType {
pub fn create(&self) -> Box<dyn OciPackerBackend> {
match self {
OciPackerBackendType::Backhand => {
Box::new(OciPackerBackhand {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::MkSquashfs => {
Box::new(OciPackerMkSquashfs {}) as Box<dyn OciPackerBackend>
}
@ -166,40 +35,88 @@ impl OciPackerBackendType {
}
}
pub trait OciPackerBackend {
fn pack(&self, progress: OciBoundProgress, directory: &Path, file: &Path) -> Result<()>;
#[async_trait::async_trait]
pub trait OciPackerBackend: Send + Sync {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()>;
}
pub struct OciPackerMkSquashfs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkSquashfs {
fn pack(&self, progress: OciBoundProgress, directory: &Path, file: &Path) -> Result<()> {
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
});
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
})
.await;
let mut child = Command::new("mksquashfs")
.arg(directory)
.arg("-")
.arg(file)
.arg("-comp")
.arg("gzip")
.stdin(Stdio::null())
.arg("-tar")
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let status = child.wait()?;
let stdin = child
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(async move {
if let Err(error) = vfs.write_to_tar(stdin).await {
warn!("failed to write tar: {}", error);
return Err(error);
}
Ok(())
}));
let wait = child.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
select! {
x = inner => {
writer = None;
match x {
Ok(_) => {},
Err(error) => {
return Err(error.into());
}
}
},
status = &mut wait => {
break status;
}
};
} else {
select! {
status = &mut wait => {
break status;
}
};
}
};
if let Some(writer) = writer {
writer.await??;
}
let status = status_result?;
if !status.success() {
Err(anyhow!(
"mksquashfs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
});
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.await;
Ok(())
}
}
@ -207,34 +124,77 @@ impl OciPackerBackend for OciPackerMkSquashfs {
pub struct OciPackerMkfsErofs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkfsErofs {
fn pack(&self, progress: OciBoundProgress, directory: &Path, file: &Path) -> Result<()> {
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
});
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
})
.await;
let mut child = Command::new("mkfs.erofs")
.arg("-L")
.arg("root")
.arg(file)
.arg(directory)
.stdin(Stdio::null())
.arg("--tar=-")
.arg(path)
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let status = child.wait()?;
let stdin = child
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(
async move { vfs.write_to_tar(stdin).await },
));
let wait = child.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
select! {
x = inner => {
match x {
Ok(_) => {
writer = None;
},
Err(error) => {
return Err(error.into());
}
}
},
status = &mut wait => {
break status;
}
};
} else {
select! {
status = &mut wait => {
break status;
}
};
}
};
if let Some(writer) = writer {
writer.await??;
}
let status = status_result?;
if !status.success() {
Err(anyhow!(
"mkfs.erofs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress.update_blocking(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
});
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.await;
Ok(())
}
}

View File

@ -1,9 +1,7 @@
use std::path::PathBuf;
use self::backend::OciPackerBackendType;
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::{
path::PathBuf,
process::{Command, Stdio},
};
pub mod backend;
pub mod cache;
@ -24,27 +22,9 @@ impl OciPackedFormat {
}
}
pub fn detect_best_backend(&self) -> OciPackerBackendType {
pub fn backend(&self) -> OciPackerBackendType {
match self {
OciPackedFormat::Squashfs => {
let status = Command::new("mksquashfs")
.arg("-version")
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
.ok();
let Some(code) = status.and_then(|x| x.code()) else {
return OciPackerBackendType::Backhand;
};
if code == 0 {
OciPackerBackendType::MkSquashfs
} else {
OciPackerBackendType::Backhand
}
}
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
}
}

View File

@ -63,12 +63,11 @@ impl OciPackerService {
.ok_or(anyhow!("tmp_dir was missing when packing image"))?;
file.push("image.pack");
let target = file.clone();
let directory = assembled.path.clone();
tokio::task::spawn_blocking(move || {
let packer = format.detect_best_backend().create();
packer.pack(progress, &directory, &target)
})
.await??;
let packer = format.backend().create();
packer
.pack(progress, assembled.vfs.clone(), &target)
.await?;
let packed = OciImagePacked::new(
assembled.digest.clone(),
file,

251
crates/oci/src/vfs.rs Normal file
View File

@ -0,0 +1,251 @@
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use tokio::{
fs::File,
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
};
use tokio_tar::{Builder, Entry, EntryType, Header};
#[derive(Clone, Debug)]
pub enum VfsNodeType {
Directory,
RegularFile,
Symlink,
Hardlink,
Fifo,
CharDevice,
BlockDevice,
}
#[derive(Clone, Debug)]
pub struct VfsNode {
pub name: String,
pub size: u64,
pub children: Vec<VfsNode>,
pub typ: VfsNodeType,
pub uid: u64,
pub gid: u64,
pub link_name: Option<String>,
pub mode: u32,
pub mtime: u64,
pub dev_major: Option<u32>,
pub dev_minor: Option<u32>,
pub disk_path: Option<PathBuf>,
}
impl VfsNode {
pub fn from<X: AsyncRead + Unpin>(entry: &Entry<X>) -> Result<VfsNode> {
let header = entry.header();
let name = entry
.path()?
.file_name()
.ok_or(anyhow!("unable to get file name for entry"))?
.to_string_lossy()
.to_string();
let typ = header.entry_type();
let vtype = if typ.is_symlink() {
VfsNodeType::Symlink
} else if typ.is_hard_link() {
VfsNodeType::Hardlink
} else if typ.is_dir() {
VfsNodeType::Directory
} else if typ.is_fifo() {
VfsNodeType::Fifo
} else if typ.is_block_special() {
VfsNodeType::BlockDevice
} else if typ.is_character_special() {
VfsNodeType::CharDevice
} else if typ.is_file() {
VfsNodeType::RegularFile
} else {
return Err(anyhow!("unable to determine vfs type for entry"));
};
Ok(VfsNode {
name,
size: header.size()?,
children: vec![],
typ: vtype,
uid: header.uid()?,
gid: header.gid()?,
link_name: header.link_name()?.map(|x| x.to_string_lossy().to_string()),
mode: header.mode()?,
mtime: header.mtime()?,
dev_major: header.device_major()?,
dev_minor: header.device_minor()?,
disk_path: None,
})
}
pub fn lookup(&self, path: &Path) -> Option<&VfsNode> {
let mut node = self;
for part in path {
node = node
.children
.iter()
.find(|child| child.name == part.to_string_lossy())?;
}
Some(node)
}
pub fn lookup_mut(&mut self, path: &Path) -> Option<&mut VfsNode> {
let mut node = self;
for part in path {
node = node
.children
.iter_mut()
.find(|child| child.name == part.to_string_lossy())?;
}
Some(node)
}
pub fn remove(&mut self, path: &Path) -> Option<VfsNode> {
let parent = path.parent()?;
let node = self.lookup_mut(parent)?;
let file_name = path.file_name()?;
let file_name = file_name.to_string_lossy();
let position = node
.children
.iter()
.position(|child| file_name == child.name)?;
Some(node.children.remove(position))
}
pub fn create_tar_header(&self) -> Result<Header> {
let mut header = Header::new_ustar();
header.set_entry_type(match self.typ {
VfsNodeType::Directory => EntryType::Directory,
VfsNodeType::CharDevice => EntryType::Char,
VfsNodeType::BlockDevice => EntryType::Block,
VfsNodeType::Fifo => EntryType::Fifo,
VfsNodeType::Hardlink => EntryType::Link,
VfsNodeType::Symlink => EntryType::Symlink,
VfsNodeType::RegularFile => EntryType::Regular,
});
header.set_uid(self.uid);
header.set_gid(self.gid);
if let Some(device_major) = self.dev_major {
header.set_device_major(device_major)?;
}
if let Some(device_minor) = self.dev_minor {
header.set_device_minor(device_minor)?;
}
header.set_mtime(self.mtime);
header.set_mode(self.mode);
if let Some(link_name) = self.link_name.as_ref() {
header.set_link_name(&PathBuf::from(link_name))?;
}
header.set_size(self.size);
Ok(header)
}
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send>(
&self,
path: &Path,
builder: &mut Builder<W>,
) -> Result<()> {
let mut header = self.create_tar_header()?;
header.set_path(path)?;
header.set_cksum();
if let Some(disk_path) = self.disk_path.as_ref() {
builder
.append(&header, File::open(disk_path).await?)
.await?;
} else {
builder.append(&header, &[] as &[u8]).await?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct VfsTree {
pub root: VfsNode,
}
impl Default for VfsTree {
fn default() -> Self {
Self::new()
}
}
impl VfsTree {
pub fn new() -> VfsTree {
VfsTree {
root: VfsNode {
name: "".to_string(),
size: 0,
children: vec![],
typ: VfsNodeType::Directory,
uid: 0,
gid: 0,
link_name: None,
mode: 0,
mtime: 0,
dev_major: None,
dev_minor: None,
disk_path: None,
},
}
}
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
let meta = VfsNode::from(entry)?;
let path = entry.path()?.to_path_buf();
let parent = if let Some(parent) = path.parent() {
self.root.lookup_mut(parent)
} else {
Some(&mut self.root)
};
let Some(parent) = parent else {
return Err(anyhow!("unable to find parent of entry"));
};
parent.children.retain(|child| child.name != meta.name);
parent.children.push(meta);
Ok(())
}
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {
let Some(node) = self.root.lookup_mut(path) else {
return Err(anyhow!(
"unable to find node {:?} to set disk path to",
path
));
};
node.disk_path = Some(disk_path.to_path_buf());
Ok(())
}
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send + 'static>(
&self,
write: W,
) -> Result<()> {
let mut builder = Builder::new(write);
let mut queue = vec![(PathBuf::from(""), &self.root)];
while !queue.is_empty() {
let (mut path, node) = queue.remove(0);
if !node.name.is_empty() {
path.push(&node.name);
}
if path.components().count() != 0 {
node.write_to_tar(&path, &mut builder).await?;
}
for child in &node.children {
queue.push((path.clone(), child));
}
}
let mut write = builder.into_inner().await?;
write.flush().await?;
drop(write);
Ok(())
}
}