feat: oci compliance work (#85)

* chore: rework oci crate to be more composable

* feat: image pull is now internally explicit

* feat: utilize vfs for assembling oci images

* feat: rework oci to preserve permissions via a vfs
This commit is contained in:
Alex Zenla
2024-04-15 10:24:14 -07:00
committed by GitHub
parent 24c71e9725
commit 89055ef77c
33 changed files with 1500 additions and 1164 deletions

View File

@ -3,13 +3,12 @@ use std::{env::args, path::PathBuf};
use anyhow::Result;
use env_logger::Env;
use krataoci::{
cache::ImageCache,
compiler::OciImageCompiler,
name::ImageName,
packer::OciPackerFormat,
packer::{service::OciPackerService, OciPackedFormat},
progress::{OciProgress, OciProgressContext},
registry::OciPlatform,
};
use tokio::{fs, sync::broadcast};
use tokio::{fs, sync::mpsc::channel};
#[tokio::main]
async fn main() -> Result<()> {
@ -23,16 +22,16 @@ async fn main() -> Result<()> {
fs::create_dir(&cache_dir).await?;
}
let cache = ImageCache::new(&cache_dir)?;
let (sender, mut receiver) = broadcast::channel::<OciProgress>(1000);
let (sender, mut receiver) = channel::<OciProgress>(100);
tokio::task::spawn(async move {
loop {
let Some(progress) = receiver.recv().await.ok() else {
break;
let mut progresses = Vec::new();
let _ = receiver.recv_many(&mut progresses, 100).await;
let Some(progress) = progresses.last() else {
continue;
};
println!("phase {:?}", progress.phase);
for (id, layer) in progress.layers {
for (id, layer) in &progress.layers {
println!(
"{} {:?} {} of {}",
id, layer.phase, layer.value, layer.total
@ -41,14 +40,14 @@ async fn main() -> Result<()> {
}
});
let context = OciProgressContext::new(sender);
let compiler = OciImageCompiler::new(&cache, seed, context)?;
let info = compiler
.compile(&image.to_string(), &image, OciPackerFormat::Squashfs)
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current())?;
let packed = service
.request(image.clone(), OciPackedFormat::Squashfs, context)
.await?;
println!(
"generated squashfs of {} to {}",
image,
info.image.to_string_lossy()
packed.path.to_string_lossy()
);
Ok(())
}

239
crates/oci/src/assemble.rs Normal file
View File

@ -0,0 +1,239 @@
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
use crate::progress::OciBoundProgress;
use crate::vfs::{VfsNode, VfsTree};
use anyhow::{anyhow, Result};
use log::{debug, trace, warn};
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
use tokio::fs;
use tokio::io::AsyncRead;
use tokio_stream::StreamExt;
use tokio_tar::{Archive, Entry};
use uuid::Uuid;
pub struct OciImageAssembled {
pub digest: String,
pub manifest: ImageManifest,
pub config: ImageConfiguration,
pub vfs: Arc<VfsTree>,
pub tmp_dir: Option<PathBuf>,
}
impl Drop for OciImageAssembled {
fn drop(&mut self) {
if let Some(tmp) = self.tmp_dir.clone() {
tokio::task::spawn(async move {
let _ = fs::remove_dir_all(&tmp).await;
});
}
}
}
pub struct OciImageAssembler {
downloader: OciImageFetcher,
resolved: OciResolvedImage,
progress: OciBoundProgress,
work_dir: PathBuf,
disk_dir: PathBuf,
tmp_dir: Option<PathBuf>,
}
impl OciImageAssembler {
pub async fn new(
downloader: OciImageFetcher,
resolved: OciResolvedImage,
progress: OciBoundProgress,
work_dir: Option<PathBuf>,
disk_dir: Option<PathBuf>,
) -> Result<OciImageAssembler> {
let tmp_dir = if work_dir.is_none() || disk_dir.is_none() {
let mut tmp_dir = std::env::temp_dir().clone();
tmp_dir.push(format!("oci-assemble-{}", Uuid::new_v4()));
Some(tmp_dir)
} else {
None
};
let work_dir = if let Some(work_dir) = work_dir {
work_dir
} else {
let mut tmp_dir = tmp_dir
.clone()
.ok_or(anyhow!("tmp_dir was not created when expected"))?;
tmp_dir.push("work");
tmp_dir
};
let target_dir = if let Some(target_dir) = disk_dir {
target_dir
} else {
let mut tmp_dir = tmp_dir
.clone()
.ok_or(anyhow!("tmp_dir was not created when expected"))?;
tmp_dir.push("image");
tmp_dir
};
fs::create_dir_all(&work_dir).await?;
fs::create_dir_all(&target_dir).await?;
Ok(OciImageAssembler {
downloader,
resolved,
progress,
work_dir,
disk_dir: target_dir,
tmp_dir,
})
}
pub async fn assemble(self) -> Result<OciImageAssembled> {
debug!("assemble");
let mut layer_dir = self.work_dir.clone();
layer_dir.push("layer");
fs::create_dir_all(&layer_dir).await?;
self.assemble_with(&layer_dir).await
}
async fn assemble_with(self, layer_dir: &Path) -> Result<OciImageAssembled> {
let local = self
.downloader
.download(self.resolved.clone(), layer_dir)
.await?;
let mut vfs = VfsTree::new();
for layer in &local.layers {
debug!(
"process layer digest={} compression={:?}",
&layer.digest, layer.compression,
);
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, 0, 1);
})
.await;
debug!("process layer digest={}", &layer.digest,);
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
let Some(name) = path.file_name() else {
continue;
};
let Some(name) = name.to_str() else {
continue;
};
if name.starts_with(".wh.") {
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
.await?;
} else {
vfs.insert_tar_entry(&entry)?;
self.process_write_entry(&mut vfs, &mut entry, layer)
.await?;
}
}
self.progress
.update(|progress| {
progress.extracted_layer(&layer.digest);
})
.await;
}
for layer in &local.layers {
if layer.path.exists() {
fs::remove_file(&layer.path).await?;
}
}
Ok(OciImageAssembled {
vfs: Arc::new(vfs),
digest: self.resolved.digest,
manifest: self.resolved.manifest,
config: local.config,
tmp_dir: self.tmp_dir,
})
}
async fn process_whiteout_entry(
&self,
vfs: &mut VfsTree,
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
name: &str,
layer: &OciImageLayer,
) -> Result<()> {
let path = entry.path()?;
let mut path = path.to_path_buf();
path.pop();
let opaque = name == ".wh..wh..opq";
if !opaque {
let file = &name[4..];
path.push(file);
}
trace!(
"whiteout entry {:?} layer={} path={:?}",
entry.path()?,
&layer.digest,
path
);
let result = vfs.root.remove(&path);
if let Some((parent, mut removed)) = result {
delete_disk_paths(&removed).await?;
if opaque {
removed.children.clear();
parent.children.push(removed);
}
} else {
warn!(
"whiteout entry layer={} path={:?} did not exist",
&layer.digest, path
);
}
Ok(())
}
async fn process_write_entry(
&self,
vfs: &mut VfsTree,
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
layer: &OciImageLayer,
) -> Result<()> {
if !entry.header().entry_type().is_file() {
return Ok(());
}
trace!(
"unpack entry layer={} path={:?} type={:?}",
&layer.digest,
entry.path()?,
entry.header().entry_type(),
);
entry.set_preserve_permissions(false);
entry.set_unpack_xattrs(false);
entry.set_preserve_mtime(false);
let path = entry
.unpack_in(&self.disk_dir)
.await?
.ok_or(anyhow!("unpack did not return a path"))?;
vfs.set_disk_path(&entry.path()?, &path)?;
Ok(())
}
}
async fn delete_disk_paths(node: &VfsNode) -> Result<()> {
let mut queue = vec![node];
while !queue.is_empty() {
let node = queue.remove(0);
if let Some(ref disk_path) = node.disk_path {
if !disk_path.exists() {
warn!("disk path {:?} does not exist", disk_path);
}
fs::remove_file(disk_path).await?;
}
let children = node.children.iter().collect::<Vec<_>>();
queue.extend_from_slice(&children);
}
Ok(())
}

View File

@ -1,388 +0,0 @@
use crate::cache::ImageCache;
use crate::fetch::{OciImageDownloader, OciImageLayer};
use crate::name::ImageName;
use crate::packer::OciPackerFormat;
use crate::progress::{OciProgress, OciProgressContext, OciProgressPhase};
use crate::registry::OciRegistryPlatform;
use anyhow::{anyhow, Result};
use indexmap::IndexMap;
use log::{debug, trace};
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use tokio::fs;
use tokio::io::AsyncRead;
use tokio_stream::StreamExt;
use tokio_tar::{Archive, Entry};
use uuid::Uuid;
pub const IMAGE_PACKER_VERSION: u64 = 2;
pub struct ImageInfo {
pub image: PathBuf,
pub manifest: ImageManifest,
pub config: ImageConfiguration,
}
impl ImageInfo {
pub fn new(
image: PathBuf,
manifest: ImageManifest,
config: ImageConfiguration,
) -> Result<ImageInfo> {
Ok(ImageInfo {
image,
manifest,
config,
})
}
}
pub struct OciImageCompiler<'a> {
cache: &'a ImageCache,
seed: Option<PathBuf>,
progress: OciProgressContext,
}
impl OciImageCompiler<'_> {
pub fn new(
cache: &ImageCache,
seed: Option<PathBuf>,
progress: OciProgressContext,
) -> Result<OciImageCompiler> {
Ok(OciImageCompiler {
cache,
seed,
progress,
})
}
pub async fn compile(
&self,
id: &str,
image: &ImageName,
format: OciPackerFormat,
) -> Result<ImageInfo> {
debug!("compile image={image} format={:?}", format);
let mut tmp_dir = std::env::temp_dir().clone();
tmp_dir.push(format!("krata-compile-{}", Uuid::new_v4()));
let mut image_dir = tmp_dir.clone();
image_dir.push("image");
fs::create_dir_all(&image_dir).await?;
let mut layer_dir = tmp_dir.clone();
layer_dir.push("layer");
fs::create_dir_all(&layer_dir).await?;
let mut packed_file = tmp_dir.clone();
packed_file.push("image.packed");
let _guard = scopeguard::guard(tmp_dir.to_path_buf(), |delete| {
tokio::task::spawn(async move {
let _ = fs::remove_dir_all(delete).await;
});
});
let info = self
.download_and_compile(id, image, &layer_dir, &image_dir, &packed_file, format)
.await?;
Ok(info)
}
async fn download_and_compile(
&self,
id: &str,
image: &ImageName,
layer_dir: &Path,
image_dir: &Path,
packed_file: &Path,
format: OciPackerFormat,
) -> Result<ImageInfo> {
let mut progress = OciProgress {
id: id.to_string(),
phase: OciProgressPhase::Resolving,
layers: IndexMap::new(),
value: 0,
total: 0,
};
self.progress.update(&progress);
let downloader = OciImageDownloader::new(
self.seed.clone(),
layer_dir.to_path_buf(),
OciRegistryPlatform::current(),
self.progress.clone(),
);
let resolved = downloader.resolve(image.clone()).await?;
let cache_key = format!(
"manifest={}:version={}:format={}\n",
resolved.digest,
IMAGE_PACKER_VERSION,
format.id(),
);
let cache_digest = sha256::digest(cache_key);
if let Some(cached) = self.cache.recall(&cache_digest, format).await? {
return Ok(cached);
}
progress.phase = OciProgressPhase::Resolved;
for layer in resolved.manifest.layers() {
progress.add_layer(layer.digest());
}
self.progress.update(&progress);
let local = downloader.download(resolved, &mut progress).await?;
for layer in &local.layers {
debug!(
"process layer digest={} compression={:?}",
&layer.digest, layer.compression,
);
progress.extracting_layer(&layer.digest, 0, 1);
self.progress.update(&progress);
let (whiteouts, count) = self.process_layer_whiteout(layer, image_dir).await?;
progress.extracting_layer(&layer.digest, 0, count);
self.progress.update(&progress);
debug!(
"process layer digest={} whiteouts={:?}",
&layer.digest, whiteouts
);
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
let mut completed = 0;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
let mut maybe_whiteout_path_str =
path.to_str().map(|x| x.to_string()).unwrap_or_default();
if (completed % 10) == 0 {
progress.extracting_layer(&layer.digest, completed, count);
}
completed += 1;
self.progress.update(&progress);
if whiteouts.contains(&maybe_whiteout_path_str) {
continue;
}
maybe_whiteout_path_str.push('/');
if whiteouts.contains(&maybe_whiteout_path_str) {
continue;
}
let Some(name) = path.file_name() else {
continue;
};
let Some(name) = name.to_str() else {
continue;
};
if name.starts_with(".wh.") {
continue;
} else {
self.process_write_entry(&mut entry, layer, image_dir)
.await?;
}
}
progress.extracted_layer(&layer.digest);
self.progress.update(&progress);
}
for layer in &local.layers {
if layer.path.exists() {
fs::remove_file(&layer.path).await?;
}
}
let image_dir_pack = image_dir.to_path_buf();
let packed_file_pack = packed_file.to_path_buf();
let progress_pack = progress.clone();
let progress_context = self.progress.clone();
let format_pack = format;
progress = tokio::task::spawn_blocking(move || {
OciImageCompiler::pack(
format_pack,
&image_dir_pack,
&packed_file_pack,
progress_pack,
progress_context,
)
})
.await??;
let info = ImageInfo::new(
packed_file.to_path_buf(),
local.image.manifest,
local.config,
)?;
let info = self.cache.store(&cache_digest, &info, format).await?;
progress.phase = OciProgressPhase::Complete;
progress.value = 0;
progress.total = 0;
self.progress.update(&progress);
Ok(info)
}
async fn process_layer_whiteout(
&self,
layer: &OciImageLayer,
image_dir: &Path,
) -> Result<(Vec<String>, usize)> {
let mut whiteouts = Vec::new();
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
let mut count = 0usize;
while let Some(entry) = entries.next().await {
let entry = entry?;
count += 1;
let path = entry.path()?;
let Some(name) = path.file_name() else {
continue;
};
let Some(name) = name.to_str() else {
continue;
};
if name.starts_with(".wh.") {
let path = self
.process_whiteout_entry(&entry, name, layer, image_dir)
.await?;
if let Some(path) = path {
whiteouts.push(path);
}
}
}
Ok((whiteouts, count))
}
async fn process_whiteout_entry(
&self,
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
name: &str,
layer: &OciImageLayer,
image_dir: &Path,
) -> Result<Option<String>> {
let path = entry.path()?;
let mut dst = self.check_safe_entry(path.clone(), image_dir)?;
dst.pop();
let mut path = path.to_path_buf();
path.pop();
let opaque = name == ".wh..wh..opq";
if !opaque {
let file = &name[4..];
dst.push(file);
path.push(file);
self.check_safe_path(&dst, image_dir)?;
}
trace!("whiteout entry layer={} path={:?}", &layer.digest, path,);
let whiteout = path
.to_str()
.ok_or(anyhow!("unable to convert path to string"))?
.to_string();
if opaque {
if dst.is_dir() {
let mut reader = fs::read_dir(dst).await?;
while let Some(entry) = reader.next_entry().await? {
let path = entry.path();
if path.is_symlink() || path.is_file() {
fs::remove_file(&path).await?;
} else if path.is_dir() {
fs::remove_dir_all(&path).await?;
} else {
return Err(anyhow!("opaque whiteout entry did not exist"));
}
}
} else {
debug!(
"whiteout opaque entry missing locally layer={} path={:?} local={:?}",
&layer.digest,
entry.path()?,
dst,
);
}
} else if dst.is_file() || dst.is_symlink() {
fs::remove_file(&dst).await?;
} else if dst.is_dir() {
fs::remove_dir_all(&dst).await?;
} else {
debug!(
"whiteout entry missing locally layer={} path={:?} local={:?}",
&layer.digest,
entry.path()?,
dst,
);
}
Ok(if opaque { None } else { Some(whiteout) })
}
async fn process_write_entry(
&self,
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
layer: &OciImageLayer,
image_dir: &Path,
) -> Result<()> {
let uid = entry.header().uid()?;
let gid = entry.header().gid()?;
trace!(
"unpack entry layer={} path={:?} type={:?} uid={} gid={}",
&layer.digest,
entry.path()?,
entry.header().entry_type(),
uid,
gid,
);
entry.set_preserve_mtime(true);
entry.set_preserve_permissions(true);
entry.set_unpack_xattrs(true);
if let Some(path) = entry.unpack_in(image_dir).await? {
if !path.is_symlink() {
std::os::unix::fs::chown(path, Some(uid as u32), Some(gid as u32))?;
}
}
Ok(())
}
fn check_safe_entry(&self, path: Cow<Path>, image_dir: &Path) -> Result<PathBuf> {
let mut dst = image_dir.to_path_buf();
dst.push(path);
if let Some(name) = dst.file_name() {
if let Some(name) = name.to_str() {
if name.starts_with(".wh.") {
let copy = dst.clone();
dst.pop();
self.check_safe_path(&dst, image_dir)?;
return Ok(copy);
}
}
}
self.check_safe_path(&dst, image_dir)?;
Ok(dst)
}
fn check_safe_path(&self, dst: &Path, image_dir: &Path) -> Result<()> {
let resolved = path_clean::clean(dst);
if !resolved.starts_with(image_dir) {
return Err(anyhow!("layer attempts to work outside image dir"));
}
Ok(())
}
fn pack(
format: OciPackerFormat,
image_dir: &Path,
packed_file: &Path,
mut progress: OciProgress,
progress_context: OciProgressContext,
) -> Result<OciProgress> {
let backend = format.detect_best_backend();
let backend = backend.create();
backend.pack(&mut progress, &progress_context, image_dir, packed_file)?;
std::fs::remove_dir_all(image_dir)?;
progress.phase = OciProgressPhase::Packing;
progress.value = progress.total;
progress_context.update(&progress);
Ok(progress)
}
}

View File

@ -1,8 +1,8 @@
use crate::progress::{OciProgress, OciProgressContext, OciProgressPhase};
use crate::progress::{OciBoundProgress, OciProgressPhase};
use super::{
name::ImageName,
registry::{OciRegistryClient, OciRegistryPlatform},
registry::{OciPlatform, OciRegistryClient},
};
use std::{
@ -24,11 +24,10 @@ use tokio::{
use tokio_stream::StreamExt;
use tokio_tar::Archive;
pub struct OciImageDownloader {
pub struct OciImageFetcher {
seed: Option<PathBuf>,
storage: PathBuf,
platform: OciRegistryPlatform,
progress: OciProgressContext,
platform: OciPlatform,
progress: OciBoundProgress,
}
#[derive(Clone, Debug, PartialEq, Eq)]
@ -77,16 +76,14 @@ pub struct OciLocalImage {
pub layers: Vec<OciImageLayer>,
}
impl OciImageDownloader {
impl OciImageFetcher {
pub fn new(
seed: Option<PathBuf>,
storage: PathBuf,
platform: OciRegistryPlatform,
progress: OciProgressContext,
) -> OciImageDownloader {
OciImageDownloader {
platform: OciPlatform,
progress: OciBoundProgress,
) -> OciImageFetcher {
OciImageFetcher {
seed,
storage,
platform,
progress,
}
@ -216,12 +213,14 @@ impl OciImageDownloader {
pub async fn download(
&self,
image: OciResolvedImage,
progress: &mut OciProgress,
layer_dir: &Path,
) -> Result<OciLocalImage> {
let config: ImageConfiguration;
progress.phase = OciProgressPhase::ConfigAcquire;
self.progress.update(progress);
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::ConfigAcquire;
})
.await;
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
if let Some(seeded) = self
.load_seed_json_blob::<ImageConfiguration>(image.manifest.config())
@ -234,18 +233,31 @@ impl OciImageDownloader {
.await?;
config = serde_json::from_slice(&config_bytes)?;
}
progress.phase = OciProgressPhase::LayerAcquire;
self.progress.update(progress);
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::LayerAcquire;
for layer in image.manifest.layers() {
progress.add_layer(layer.digest(), layer.size() as usize);
}
})
.await;
let mut layers = Vec::new();
for layer in image.manifest.layers() {
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
self.progress.update(progress);
self.progress
.update(|progress| {
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
})
.await;
layers.push(
self.acquire_layer(&image.name, layer, &mut client, progress)
self.acquire_layer(&image.name, layer, layer_dir, &mut client)
.await?,
);
progress.downloaded_layer(layer.digest());
self.progress.update(progress);
self.progress
.update(|progress| {
progress.downloaded_layer(layer.digest());
})
.await;
}
Ok(OciLocalImage {
image,
@ -258,28 +270,22 @@ impl OciImageDownloader {
&self,
image: &ImageName,
layer: &Descriptor,
layer_dir: &Path,
client: &mut OciRegistryClient,
progress: &mut OciProgress,
) -> Result<OciImageLayer> {
debug!(
"acquire layer digest={} size={}",
layer.digest(),
layer.size()
);
let mut layer_path = self.storage.clone();
let mut layer_path = layer_dir.to_path_buf();
layer_path.push(format!("{}.layer", layer.digest()));
let seeded = self.extract_seed_blob(layer, &layer_path).await?;
if !seeded {
let file = File::create(&layer_path).await?;
let size = client
.write_blob_to_file(
&image.name,
layer,
file,
Some(progress),
Some(&self.progress),
)
.write_blob_to_file(&image.name, layer, file, Some(self.progress.clone()))
.await?;
if layer.size() as u64 != size {
return Err(anyhow!(

View File

@ -1,7 +1,7 @@
pub mod cache;
pub mod compiler;
pub mod assemble;
pub mod fetch;
pub mod name;
pub mod packer;
pub mod progress;
pub mod registry;
pub mod vfs;

View File

@ -1,307 +0,0 @@
use std::{
fs::File,
io::{BufWriter, ErrorKind, Read},
os::unix::fs::{FileTypeExt, MetadataExt, PermissionsExt},
path::{Path, PathBuf},
process::{Command, Stdio},
};
use anyhow::{anyhow, Result};
use backhand::{compression::Compressor, FilesystemCompressor, FilesystemWriter, NodeHeader};
use log::{trace, warn};
use walkdir::WalkDir;
use crate::progress::{OciProgress, OciProgressContext, OciProgressPhase};
#[derive(Debug, Default, Clone, Copy)]
pub enum OciPackerFormat {
#[default]
Squashfs,
Erofs,
}
#[derive(Debug, Clone, Copy)]
pub enum OciPackerBackendType {
Backhand,
MkSquashfs,
MkfsErofs,
}
impl OciPackerFormat {
pub fn id(&self) -> u8 {
match self {
OciPackerFormat::Squashfs => 0,
OciPackerFormat::Erofs => 1,
}
}
pub fn extension(&self) -> &str {
match self {
OciPackerFormat::Squashfs => "erofs",
OciPackerFormat::Erofs => "erofs",
}
}
pub fn detect_best_backend(&self) -> OciPackerBackendType {
match self {
OciPackerFormat::Squashfs => {
let status = Command::new("mksquashfs")
.arg("-version")
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
.ok();
let Some(code) = status.and_then(|x| x.code()) else {
return OciPackerBackendType::Backhand;
};
if code == 0 {
OciPackerBackendType::MkSquashfs
} else {
OciPackerBackendType::Backhand
}
}
OciPackerFormat::Erofs => OciPackerBackendType::MkfsErofs,
}
}
}
impl OciPackerBackendType {
pub fn format(&self) -> OciPackerFormat {
match self {
OciPackerBackendType::Backhand => OciPackerFormat::Squashfs,
OciPackerBackendType::MkSquashfs => OciPackerFormat::Squashfs,
OciPackerBackendType::MkfsErofs => OciPackerFormat::Erofs,
}
}
pub fn create(&self) -> Box<dyn OciPackerBackend> {
match self {
OciPackerBackendType::Backhand => {
Box::new(OciPackerBackhand {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::MkSquashfs => {
Box::new(OciPackerMkSquashfs {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::MkfsErofs => {
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
}
}
}
}
pub trait OciPackerBackend {
fn pack(
&self,
progress: &mut OciProgress,
progress_context: &OciProgressContext,
directory: &Path,
file: &Path,
) -> Result<()>;
}
pub struct OciPackerBackhand {}
impl OciPackerBackend for OciPackerBackhand {
fn pack(
&self,
progress: &mut OciProgress,
progress_context: &OciProgressContext,
directory: &Path,
file: &Path,
) -> Result<()> {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress_context.update(progress);
let mut writer = FilesystemWriter::default();
writer.set_compressor(FilesystemCompressor::new(Compressor::Gzip, None)?);
let walk = WalkDir::new(directory).follow_links(false);
for entry in walk {
let entry = entry?;
let rel = entry
.path()
.strip_prefix(directory)?
.to_str()
.ok_or_else(|| anyhow!("failed to strip prefix of tmpdir"))?;
let rel = format!("/{}", rel);
trace!("squash write {}", rel);
let typ = entry.file_type();
let metadata = std::fs::symlink_metadata(entry.path())?;
let uid = metadata.uid();
let gid = metadata.gid();
let mode = metadata.permissions().mode();
let mtime = metadata.mtime();
if rel == "/" {
writer.set_root_uid(uid);
writer.set_root_gid(gid);
writer.set_root_mode(mode as u16);
continue;
}
let header = NodeHeader {
permissions: mode as u16,
uid,
gid,
mtime: mtime as u32,
};
if typ.is_symlink() {
let symlink = std::fs::read_link(entry.path())?;
let symlink = symlink
.to_str()
.ok_or_else(|| anyhow!("failed to read symlink"))?;
writer.push_symlink(symlink, rel, header)?;
} else if typ.is_dir() {
writer.push_dir(rel, header)?;
} else if typ.is_file() {
writer.push_file(ConsumingFileReader::new(entry.path()), rel, header)?;
} else if typ.is_block_device() {
let device = metadata.dev();
writer.push_block_device(device as u32, rel, header)?;
} else if typ.is_char_device() {
let device = metadata.dev();
writer.push_char_device(device as u32, rel, header)?;
} else if typ.is_fifo() {
writer.push_fifo(rel, header)?;
} else if typ.is_socket() {
writer.push_socket(rel, header)?;
} else {
return Err(anyhow!("invalid file type"));
}
}
progress.phase = OciProgressPhase::Packing;
progress.value = 1;
progress_context.update(progress);
let squash_file_path = file
.to_str()
.ok_or_else(|| anyhow!("failed to convert squashfs string"))?;
let file = File::create(file)?;
let mut bufwrite = BufWriter::new(file);
trace!("squash generate: {}", squash_file_path);
writer.write(&mut bufwrite)?;
Ok(())
}
}
struct ConsumingFileReader {
path: PathBuf,
file: Option<File>,
}
impl ConsumingFileReader {
fn new(path: &Path) -> ConsumingFileReader {
ConsumingFileReader {
path: path.to_path_buf(),
file: None,
}
}
}
impl Read for ConsumingFileReader {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
if self.file.is_none() {
self.file = Some(File::open(&self.path)?);
}
let Some(ref mut file) = self.file else {
return Err(std::io::Error::new(
ErrorKind::NotFound,
"file was not opened",
));
};
file.read(buf)
}
}
impl Drop for ConsumingFileReader {
fn drop(&mut self) {
let file = self.file.take();
drop(file);
if let Err(error) = std::fs::remove_file(&self.path) {
warn!("failed to delete consuming file {:?}: {}", self.path, error);
}
}
}
pub struct OciPackerMkSquashfs {}
impl OciPackerBackend for OciPackerMkSquashfs {
fn pack(
&self,
progress: &mut OciProgress,
progress_context: &OciProgressContext,
directory: &Path,
file: &Path,
) -> Result<()> {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress_context.update(progress);
let mut child = Command::new("mksquashfs")
.arg(directory)
.arg(file)
.arg("-comp")
.arg("gzip")
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let status = child.wait()?;
if !status.success() {
Err(anyhow!(
"mksquashfs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
progress_context.update(progress);
Ok(())
}
}
}
pub struct OciPackerMkfsErofs {}
impl OciPackerBackend for OciPackerMkfsErofs {
fn pack(
&self,
progress: &mut OciProgress,
progress_context: &OciProgressContext,
directory: &Path,
file: &Path,
) -> Result<()> {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress_context.update(progress);
let mut child = Command::new("mkfs.erofs")
.arg("-L")
.arg("root")
.arg(file)
.arg(directory)
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let status = child.wait()?;
if !status.success() {
Err(anyhow!(
"mkfs.erofs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
progress_context.update(progress);
Ok(())
}
}
}

View File

@ -0,0 +1,201 @@
use std::{path::Path, process::Stdio, sync::Arc};
use super::OciPackedFormat;
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
vfs::VfsTree,
};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::{pin, process::Command, select};
#[derive(Debug, Clone, Copy)]
pub enum OciPackerBackendType {
MkSquashfs,
MkfsErofs,
}
impl OciPackerBackendType {
pub fn format(&self) -> OciPackedFormat {
match self {
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
}
}
pub fn create(&self) -> Box<dyn OciPackerBackend> {
match self {
OciPackerBackendType::MkSquashfs => {
Box::new(OciPackerMkSquashfs {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::MkfsErofs => {
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
}
}
}
}
#[async_trait::async_trait]
pub trait OciPackerBackend: Send + Sync {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()>;
}
pub struct OciPackerMkSquashfs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkSquashfs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
})
.await;
let mut child = Command::new("mksquashfs")
.arg("-")
.arg(file)
.arg("-comp")
.arg("gzip")
.arg("-tar")
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let stdin = child
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(async move {
if let Err(error) = vfs.write_to_tar(stdin).await {
warn!("failed to write tar: {}", error);
return Err(error);
}
Ok(())
}));
let wait = child.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
select! {
x = inner => {
writer = None;
match x {
Ok(_) => {},
Err(error) => {
return Err(error.into());
}
}
},
status = &mut wait => {
break status;
}
};
} else {
select! {
status = &mut wait => {
break status;
}
};
}
};
if let Some(writer) = writer {
writer.await??;
}
let status = status_result?;
if !status.success() {
Err(anyhow!(
"mksquashfs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.await;
Ok(())
}
}
}
pub struct OciPackerMkfsErofs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkfsErofs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
})
.await;
let mut child = Command::new("mkfs.erofs")
.arg("-L")
.arg("root")
.arg("--tar=-")
.arg(path)
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let stdin = child
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(
async move { vfs.write_to_tar(stdin).await },
));
let wait = child.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
select! {
x = inner => {
match x {
Ok(_) => {
writer = None;
},
Err(error) => {
return Err(error.into());
}
}
},
status = &mut wait => {
break status;
}
};
} else {
select! {
status = &mut wait => {
break status;
}
};
}
};
if let Some(writer) = writer {
writer.await??;
}
let status = status_result?;
if !status.success() {
Err(anyhow!(
"mkfs.erofs failed with exit code: {}",
status.code().unwrap()
))
} else {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.await;
Ok(())
}
}
}

View File

@ -1,6 +1,5 @@
use crate::packer::OciPackerFormat;
use crate::packer::{OciImagePacked, OciPackedFormat};
use super::compiler::ImageInfo;
use anyhow::Result;
use log::debug;
use oci_spec::image::{ImageConfiguration, ImageManifest};
@ -8,18 +7,22 @@ use std::path::{Path, PathBuf};
use tokio::fs;
#[derive(Clone)]
pub struct ImageCache {
pub struct OciPackerCache {
cache_dir: PathBuf,
}
impl ImageCache {
pub fn new(cache_dir: &Path) -> Result<ImageCache> {
Ok(ImageCache {
impl OciPackerCache {
pub fn new(cache_dir: &Path) -> Result<OciPackerCache> {
Ok(OciPackerCache {
cache_dir: cache_dir.to_path_buf(),
})
}
pub async fn recall(&self, digest: &str, format: OciPackerFormat) -> Result<Option<ImageInfo>> {
pub async fn recall(
&self,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
let mut fs_path = self.cache_dir.clone();
let mut config_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
@ -40,7 +43,13 @@ impl ImageCache {
let config_text = fs::read_to_string(&config_path).await?;
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
debug!("cache hit digest={}", digest);
Some(ImageInfo::new(fs_path.clone(), manifest, config)?)
Some(OciImagePacked::new(
digest.to_string(),
fs_path.clone(),
format,
config,
manifest,
))
} else {
None
}
@ -51,24 +60,25 @@ impl ImageCache {
)
}
pub async fn store(
&self,
digest: &str,
info: &ImageInfo,
format: OciPackerFormat,
) -> Result<ImageInfo> {
debug!("cache store digest={}", digest);
pub async fn store(&self, packed: OciImagePacked) -> Result<OciImagePacked> {
debug!("cache store digest={}", packed.digest);
let mut fs_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
let mut config_path = self.cache_dir.clone();
fs_path.push(format!("{}.{}", digest, format.extension()));
manifest_path.push(format!("{}.manifest.json", digest));
config_path.push(format!("{}.config.json", digest));
fs::copy(&info.image, &fs_path).await?;
let manifest_text = serde_json::to_string_pretty(&info.manifest)?;
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
manifest_path.push(format!("{}.manifest.json", packed.digest));
config_path.push(format!("{}.config.json", packed.digest));
fs::copy(&packed.path, &fs_path).await?;
let manifest_text = serde_json::to_string_pretty(&packed.manifest)?;
fs::write(&manifest_path, manifest_text).await?;
let config_text = serde_json::to_string_pretty(&info.config)?;
let config_text = serde_json::to_string_pretty(&packed.config)?;
fs::write(&config_path, config_text).await?;
ImageInfo::new(fs_path.clone(), info.manifest.clone(), info.config.clone())
Ok(OciImagePacked::new(
packed.digest,
fs_path.clone(),
packed.format,
packed.config,
packed.manifest,
))
}
}

View File

@ -0,0 +1,58 @@
use std::path::PathBuf;
use self::backend::OciPackerBackendType;
use oci_spec::image::{ImageConfiguration, ImageManifest};
pub mod backend;
pub mod cache;
pub mod service;
#[derive(Debug, Default, Clone, Copy)]
pub enum OciPackedFormat {
#[default]
Squashfs,
Erofs,
}
impl OciPackedFormat {
pub fn extension(&self) -> &str {
match self {
OciPackedFormat::Squashfs => "squashfs",
OciPackedFormat::Erofs => "erofs",
}
}
pub fn backend(&self) -> OciPackerBackendType {
match self {
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
}
}
}
#[derive(Clone)]
pub struct OciImagePacked {
pub digest: String,
pub path: PathBuf,
pub format: OciPackedFormat,
pub config: ImageConfiguration,
pub manifest: ImageManifest,
}
impl OciImagePacked {
pub fn new(
digest: String,
path: PathBuf,
format: OciPackedFormat,
config: ImageConfiguration,
manifest: ImageManifest,
) -> OciImagePacked {
OciImagePacked {
digest,
path,
format,
config,
manifest,
}
}
}

View File

@ -0,0 +1,81 @@
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use crate::{
assemble::OciImageAssembler,
fetch::OciImageFetcher,
name::ImageName,
progress::{OciBoundProgress, OciProgress, OciProgressContext},
registry::OciPlatform,
};
use super::{cache::OciPackerCache, OciImagePacked, OciPackedFormat};
#[derive(Clone)]
pub struct OciPackerService {
seed: Option<PathBuf>,
platform: OciPlatform,
cache: OciPackerCache,
}
impl OciPackerService {
pub fn new(
seed: Option<PathBuf>,
cache_dir: &Path,
platform: OciPlatform,
) -> Result<OciPackerService> {
Ok(OciPackerService {
seed,
cache: OciPackerCache::new(cache_dir)?,
platform,
})
}
pub async fn recall(
&self,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
self.cache.recall(digest, format).await
}
pub async fn request(
&self,
name: ImageName,
format: OciPackedFormat,
progress_context: OciProgressContext,
) -> Result<OciImagePacked> {
let progress = OciProgress::new();
let progress = OciBoundProgress::new(progress_context.clone(), progress);
let fetcher =
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
let resolved = fetcher.resolve(name).await?;
if let Some(cached) = self.cache.recall(&resolved.digest, format).await? {
return Ok(cached);
}
let assembler =
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
let assembled = assembler.assemble().await?;
let mut file = assembled
.tmp_dir
.clone()
.ok_or(anyhow!("tmp_dir was missing when packing image"))?;
file.push("image.pack");
let target = file.clone();
let packer = format.backend().create();
packer
.pack(progress, assembled.vfs.clone(), &target)
.await?;
let packed = OciImagePacked::new(
assembled.digest.clone(),
file,
format,
assembled.config.clone(),
assembled.manifest.clone(),
);
let packed = self.cache.store(packed).await?;
Ok(packed)
}
}

View File

@ -1,24 +1,40 @@
use std::sync::Arc;
use indexmap::IndexMap;
use tokio::sync::broadcast::Sender;
use tokio::sync::{mpsc::Sender, Mutex};
#[derive(Clone, Debug)]
pub struct OciProgress {
pub id: String,
pub phase: OciProgressPhase,
pub layers: IndexMap<String, OciProgressLayer>,
pub value: u64,
pub total: u64,
}
impl Default for OciProgress {
fn default() -> Self {
Self::new()
}
}
impl OciProgress {
pub fn add_layer(&mut self, id: &str) {
pub fn new() -> Self {
OciProgress {
phase: OciProgressPhase::Resolving,
layers: IndexMap::new(),
value: 0,
total: 1,
}
}
pub fn add_layer(&mut self, id: &str, size: usize) {
self.layers.insert(
id.to_string(),
OciProgressLayer {
id: id.to_string(),
phase: OciProgressLayerPhase::Waiting,
value: 0,
total: 0,
total: size as u64,
},
);
}
@ -92,6 +108,33 @@ impl OciProgressContext {
}
pub fn update(&self, progress: &OciProgress) {
let _ = self.sender.send(progress.clone());
let _ = self.sender.try_send(progress.clone());
}
}
#[derive(Clone)]
pub struct OciBoundProgress {
context: OciProgressContext,
instance: Arc<Mutex<OciProgress>>,
}
impl OciBoundProgress {
pub fn new(context: OciProgressContext, progress: OciProgress) -> OciBoundProgress {
OciBoundProgress {
context,
instance: Arc::new(Mutex::new(progress)),
}
}
pub async fn update(&self, function: impl FnOnce(&mut OciProgress)) {
let mut progress = self.instance.lock().await;
function(&mut progress);
self.context.update(&progress);
}
pub fn update_blocking(&self, function: impl FnOnce(&mut OciProgress)) {
let mut progress = self.instance.blocking_lock();
function(&mut progress);
self.context.update(&progress);
}
}

View File

@ -7,28 +7,28 @@ use reqwest::{Client, RequestBuilder, Response, StatusCode};
use tokio::{fs::File, io::AsyncWriteExt};
use url::Url;
use crate::progress::{OciProgress, OciProgressContext};
use crate::progress::OciBoundProgress;
#[derive(Clone, Debug)]
pub struct OciRegistryPlatform {
pub struct OciPlatform {
pub os: Os,
pub arch: Arch,
}
impl OciRegistryPlatform {
impl OciPlatform {
#[cfg(target_arch = "x86_64")]
const CURRENT_ARCH: Arch = Arch::Amd64;
#[cfg(target_arch = "aarch64")]
const CURRENT_ARCH: Arch = Arch::ARM64;
pub fn new(os: Os, arch: Arch) -> OciRegistryPlatform {
OciRegistryPlatform { os, arch }
pub fn new(os: Os, arch: Arch) -> OciPlatform {
OciPlatform { os, arch }
}
pub fn current() -> OciRegistryPlatform {
OciRegistryPlatform {
pub fn current() -> OciPlatform {
OciPlatform {
os: Os::Linux,
arch: OciRegistryPlatform::CURRENT_ARCH,
arch: OciPlatform::CURRENT_ARCH,
}
}
}
@ -36,12 +36,12 @@ impl OciRegistryPlatform {
pub struct OciRegistryClient {
agent: Client,
url: Url,
platform: OciRegistryPlatform,
platform: OciPlatform,
token: Option<String>,
}
impl OciRegistryClient {
pub fn new(url: Url, platform: OciRegistryPlatform) -> Result<OciRegistryClient> {
pub fn new(url: Url, platform: OciPlatform) -> Result<OciRegistryClient> {
Ok(OciRegistryClient {
agent: Client::new(),
url,
@ -140,8 +140,7 @@ impl OciRegistryClient {
name: N,
descriptor: &Descriptor,
mut dest: File,
mut progress_handle: Option<&mut OciProgress>,
progress_context: Option<&OciProgressContext>,
progress: Option<OciBoundProgress>,
) -> Result<u64> {
let url = self.url.join(&format!(
"/v2/{}/blobs/{}",
@ -157,15 +156,16 @@ impl OciRegistryClient {
if (size - last_progress_size) > (5 * 1024 * 1024) {
last_progress_size = size;
if let Some(progress_handle) = progress_handle.as_mut() {
progress_handle.downloading_layer(
descriptor.digest(),
size as usize,
descriptor.size() as usize,
);
if let Some(progress_context) = progress_context.as_ref() {
progress_context.update(progress_handle);
}
if let Some(ref progress) = progress {
progress
.update(|progress| {
progress.downloading_layer(
descriptor.digest(),
size as usize,
descriptor.size() as usize,
);
})
.await;
}
}
}

261
crates/oci/src/vfs.rs Normal file
View File

@ -0,0 +1,261 @@
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use tokio::{
fs::File,
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
};
use tokio_tar::{Builder, Entry, EntryType, Header};
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum VfsNodeType {
Directory,
RegularFile,
Symlink,
Hardlink,
Fifo,
CharDevice,
BlockDevice,
}
#[derive(Clone, Debug)]
pub struct VfsNode {
pub name: String,
pub size: u64,
pub children: Vec<VfsNode>,
pub typ: VfsNodeType,
pub uid: u64,
pub gid: u64,
pub link_name: Option<String>,
pub mode: u32,
pub mtime: u64,
pub dev_major: Option<u32>,
pub dev_minor: Option<u32>,
pub disk_path: Option<PathBuf>,
}
impl VfsNode {
pub fn from<X: AsyncRead + Unpin>(entry: &Entry<X>) -> Result<VfsNode> {
let header = entry.header();
let name = entry
.path()?
.file_name()
.ok_or(anyhow!("unable to get file name for entry"))?
.to_string_lossy()
.to_string();
let typ = header.entry_type();
let vtype = if typ.is_symlink() {
VfsNodeType::Symlink
} else if typ.is_hard_link() {
VfsNodeType::Hardlink
} else if typ.is_dir() {
VfsNodeType::Directory
} else if typ.is_fifo() {
VfsNodeType::Fifo
} else if typ.is_block_special() {
VfsNodeType::BlockDevice
} else if typ.is_character_special() {
VfsNodeType::CharDevice
} else if typ.is_file() {
VfsNodeType::RegularFile
} else {
return Err(anyhow!("unable to determine vfs type for entry"));
};
Ok(VfsNode {
name,
size: header.size()?,
children: vec![],
typ: vtype,
uid: header.uid()?,
gid: header.gid()?,
link_name: header.link_name()?.map(|x| x.to_string_lossy().to_string()),
mode: header.mode()?,
mtime: header.mtime()?,
dev_major: header.device_major()?,
dev_minor: header.device_minor()?,
disk_path: None,
})
}
pub fn lookup(&self, path: &Path) -> Option<&VfsNode> {
let mut node = self;
for part in path {
node = node
.children
.iter()
.find(|child| child.name == part.to_string_lossy())?;
}
Some(node)
}
pub fn lookup_mut(&mut self, path: &Path) -> Option<&mut VfsNode> {
let mut node = self;
for part in path {
node = node
.children
.iter_mut()
.find(|child| child.name == part.to_string_lossy())?;
}
Some(node)
}
pub fn remove(&mut self, path: &Path) -> Option<(&mut VfsNode, VfsNode)> {
let parent = path.parent()?;
let node = self.lookup_mut(parent)?;
let file_name = path.file_name()?;
let file_name = file_name.to_string_lossy();
let position = node
.children
.iter()
.position(|child| file_name == child.name)?;
let removed = node.children.remove(position);
Some((node, removed))
}
pub fn create_tar_header(&self) -> Result<Header> {
let mut header = Header::new_ustar();
header.set_entry_type(match self.typ {
VfsNodeType::Directory => EntryType::Directory,
VfsNodeType::CharDevice => EntryType::Char,
VfsNodeType::BlockDevice => EntryType::Block,
VfsNodeType::Fifo => EntryType::Fifo,
VfsNodeType::Hardlink => EntryType::Link,
VfsNodeType::Symlink => EntryType::Symlink,
VfsNodeType::RegularFile => EntryType::Regular,
});
header.set_uid(self.uid);
header.set_gid(self.gid);
if let Some(device_major) = self.dev_major {
header.set_device_major(device_major)?;
}
if let Some(device_minor) = self.dev_minor {
header.set_device_minor(device_minor)?;
}
header.set_mtime(self.mtime);
header.set_mode(self.mode);
if let Some(link_name) = self.link_name.as_ref() {
header.set_link_name(&PathBuf::from(link_name))?;
}
header.set_size(self.size);
Ok(header)
}
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send>(
&self,
path: &Path,
builder: &mut Builder<W>,
) -> Result<()> {
let mut header = self.create_tar_header()?;
header.set_path(path)?;
header.set_cksum();
if let Some(disk_path) = self.disk_path.as_ref() {
builder
.append(&header, File::open(disk_path).await?)
.await?;
} else {
builder.append(&header, &[] as &[u8]).await?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct VfsTree {
pub root: VfsNode,
}
impl Default for VfsTree {
fn default() -> Self {
Self::new()
}
}
impl VfsTree {
pub fn new() -> VfsTree {
VfsTree {
root: VfsNode {
name: "".to_string(),
size: 0,
children: vec![],
typ: VfsNodeType::Directory,
uid: 0,
gid: 0,
link_name: None,
mode: 0,
mtime: 0,
dev_major: None,
dev_minor: None,
disk_path: None,
},
}
}
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
let mut meta = VfsNode::from(entry)?;
let path = entry.path()?.to_path_buf();
let parent = if let Some(parent) = path.parent() {
self.root.lookup_mut(parent)
} else {
Some(&mut self.root)
};
let Some(parent) = parent else {
return Err(anyhow!("unable to find parent of entry"));
};
let position = parent
.children
.iter()
.position(|child| meta.name == child.name);
if let Some(position) = position {
let old = parent.children.remove(position);
if meta.typ == VfsNodeType::Directory {
meta.children = old.children;
}
}
parent.children.push(meta);
Ok(())
}
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {
let Some(node) = self.root.lookup_mut(path) else {
return Err(anyhow!(
"unable to find node {:?} to set disk path to",
path
));
};
node.disk_path = Some(disk_path.to_path_buf());
Ok(())
}
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send + 'static>(
&self,
write: W,
) -> Result<()> {
let mut builder = Builder::new(write);
let mut queue = vec![(PathBuf::from(""), &self.root)];
while !queue.is_empty() {
let (mut path, node) = queue.remove(0);
if !node.name.is_empty() {
path.push(&node.name);
}
if path.components().count() != 0 {
node.write_to_tar(&path, &mut builder).await?;
}
for child in &node.children {
queue.push((path.clone(), child));
}
}
let mut write = builder.into_inner().await?;
write.flush().await?;
drop(write);
Ok(())
}
}