Compare commits

..

16 Commits

Author SHA1 Message Date
95fbc62486 chore: release (#87)
Signed-off-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-04-23 09:41:56 +00:00
284ed8f17b feat: implement guest exec (#107) 2024-04-22 20:13:43 +00:00
82576df7b7 feat: implement kernel / initrd oci image support (#103)
* feat: implement kernel / initrd oci image support

* fix: implement image urls more faithfully
2024-04-22 19:48:45 +00:00
1b90eedbcd build(deps): bump thiserror from 1.0.58 to 1.0.59 (#104)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.58 to 1.0.59.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.58...1.0.59)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:20 +00:00
aa941c6e87 build(deps): bump redb from 2.0.0 to 2.1.0 (#106)
Bumps [redb](https://github.com/cberner/redb) from 2.0.0 to 2.1.0.
- [Release notes](https://github.com/cberner/redb/releases)
- [Changelog](https://github.com/cberner/redb/blob/master/CHANGELOG.md)
- [Commits](https://github.com/cberner/redb/compare/v2.0.0...v2.1.0)

---
updated-dependencies:
- dependency-name: redb
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:13 +00:00
d0bf3c4c77 build(deps): bump reqwest from 0.12.3 to 0.12.4 (#105)
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.12.3 to 0.12.4.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.12.3...v0.12.4)

---
updated-dependencies:
- dependency-name: reqwest
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:06 +00:00
38e892e249 feat: idm v2 (#102)
* feat: rebuild idm to separate transport from content

* feat: fast guest lookup table and host identification
2024-04-22 04:00:32 +00:00
1a90372037 build(deps): bump sysinfo from 0.30.10 to 0.30.11 (#99)
Bumps [sysinfo](https://github.com/GuillaumeGomez/sysinfo) from 0.30.10 to 0.30.11.
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.30.10...v0.30.11)

---
updated-dependencies:
- dependency-name: sysinfo
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 20:35:57 +00:00
4754cdd128 build(deps): bump rustls from 0.22.3 to 0.22.4 (#101)
Bumps [rustls](https://github.com/rustls/rustls) from 0.22.3 to 0.22.4.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.22.3...v/0.22.4)

---
updated-dependencies:
- dependency-name: rustls
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 20:35:44 +00:00
f843abcabf docs(dev): update order of setup instructions (#98)
This change corrects the steps order to have the krata daemon started
before starting the krata networking service

Co-authored-by: Khionu Sybiern <khionu@edera.dev>
2024-04-18 18:00:48 +00:00
e8d89d4d5b build(deps): bump serde from 1.0.197 to 1.0.198 (#97)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.197 to 1.0.198.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.197...v1.0.198)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-17 05:17:00 +00:00
4e9738b959 fix: oci cache store should fallback to copy when rename won't work (#96) 2024-04-16 17:05:24 +00:00
8135307283 feat: oci concurrency improvements (#95)
* feat: implement improved and detailed oci progress indication

* feat: implement on-disk indexes of images

* oci: utilize rw-lock for increased cache performance
2024-04-16 16:29:54 +00:00
e450ebd2a2 feat: oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls (#88)
* oci: retain bit-perfect copies of manifest and config on disk

* feat: oci tar format support

* feat: concurrent image pulls
2024-04-16 08:53:44 +00:00
79f7742caa build(deps): bump serde_json from 1.0.115 to 1.0.116 (#90)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.115 to 1.0.116.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.115...v1.0.116)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-16 07:17:11 +00:00
c3c18271b4 build(deps): bump ratatui from 0.26.1 to 0.26.2 (#89)
Bumps [ratatui](https://github.com/ratatui-org/ratatui) from 0.26.1 to 0.26.2.
- [Release notes](https://github.com/ratatui-org/ratatui/releases)
- [Changelog](https://github.com/ratatui-org/ratatui/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ratatui-org/ratatui/compare/v0.26.1...v0.26.2)

---
updated-dependencies:
- dependency-name: ratatui
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-16 07:16:53 +00:00
68 changed files with 3278 additions and 1141 deletions

View File

@ -6,6 +6,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.0.10](https://github.com/edera-dev/krata/compare/v0.0.9...v0.0.10) - 2024-04-22
### Added
- implement guest exec ([#107](https://github.com/edera-dev/krata/pull/107))
- implement kernel / initrd oci image support ([#103](https://github.com/edera-dev/krata/pull/103))
- idm v2 ([#102](https://github.com/edera-dev/krata/pull/102))
- oci concurrency improvements ([#95](https://github.com/edera-dev/krata/pull/95))
- oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls ([#88](https://github.com/edera-dev/krata/pull/88))
### Fixed
- oci cache store should fallback to copy when rename won't work ([#96](https://github.com/edera-dev/krata/pull/96))
### Other
- update Cargo.lock dependencies
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
### Added

74
Cargo.lock generated
View File

@ -1371,7 +1371,7 @@ dependencies = [
[[package]]
name = "krata"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"async-trait",
@ -1408,10 +1408,11 @@ dependencies = [
[[package]]
name = "krata-ctl"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"async-stream",
"base64 0.22.0",
"clap",
"comfy-table",
"crossterm",
@ -1425,6 +1426,7 @@ dependencies = [
"prost-reflect",
"prost-types",
"ratatui",
"serde",
"serde_json",
"serde_yaml",
"termtree",
@ -1436,7 +1438,7 @@ dependencies = [
[[package]]
name = "krata-daemon"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"async-stream",
@ -1449,9 +1451,11 @@ dependencies = [
"krata",
"krata-oci",
"krata-runtime",
"krata-tokio-tar",
"log",
"prost",
"redb",
"scopeguard",
"signal-hook",
"tokio",
"tokio-stream",
@ -1461,7 +1465,7 @@ dependencies = [
[[package]]
name = "krata-guest"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"cgroups-rs",
@ -1485,7 +1489,7 @@ dependencies = [
[[package]]
name = "krata-network"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"async-trait",
@ -1509,7 +1513,7 @@ dependencies = [
[[package]]
name = "krata-oci"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"async-compression",
@ -1536,7 +1540,7 @@ dependencies = [
[[package]]
name = "krata-runtime"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"anyhow",
"backhand",
@ -1573,7 +1577,7 @@ dependencies = [
[[package]]
name = "krata-xencall"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"env_logger",
"libc",
@ -1586,7 +1590,7 @@ dependencies = [
[[package]]
name = "krata-xenclient"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"async-trait",
"elf",
@ -1607,7 +1611,7 @@ dependencies = [
[[package]]
name = "krata-xenevtchn"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"libc",
"log",
@ -1618,7 +1622,7 @@ dependencies = [
[[package]]
name = "krata-xengnt"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"libc",
"nix 0.28.0",
@ -1627,7 +1631,7 @@ dependencies = [
[[package]]
name = "krata-xenstore"
version = "0.0.9"
version = "0.0.10"
dependencies = [
"byteorder",
"env_logger",
@ -2264,9 +2268,9 @@ dependencies = [
[[package]]
name = "ratatui"
version = "0.26.1"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8"
checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80"
dependencies = [
"bitflags 2.5.0",
"cassowary",
@ -2304,9 +2308,9 @@ dependencies = [
[[package]]
name = "redb"
version = "2.0.0"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1100a056c5dcdd4e5513d5333385223b26ef1bf92f31eb38f407e8c20549256"
checksum = "ed7508e692a49b6b2290b56540384ccae9b1fb4d77065640b165835b56ffe3bb"
dependencies = [
"libc",
]
@ -2360,9 +2364,9 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "reqwest"
version = "0.12.3"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19"
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
dependencies = [
"base64 0.22.0",
"bytes",
@ -2459,9 +2463,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.22.3"
version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c"
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
dependencies = [
"log",
"ring",
@ -2527,9 +2531,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.197"
version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
dependencies = [
"serde_derive",
]
@ -2546,9 +2550,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.197"
version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
dependencies = [
"proc-macro2",
"quote",
@ -2557,9 +2561,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.115"
version = "1.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
dependencies = [
"itoa",
"ryu",
@ -2717,12 +2721,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
name = "stability"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce"
checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a"
dependencies = [
"quote",
"syn 1.0.109",
"syn 2.0.57",
]
[[package]]
@ -2820,9 +2824,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.30.10"
version = "0.30.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b"
checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83"
dependencies = [
"cfg-if",
"core-foundation-sys",
@ -2859,18 +2863,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "thiserror"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
dependencies = [
"proc-macro2",
"quote",

View File

@ -16,7 +16,7 @@ members = [
resolver = "2"
[workspace.package]
version = "0.0.9"
version = "0.0.10"
homepage = "https://krata.dev"
license = "Apache-2.0"
repository = "https://github.com/edera-dev/krata"
@ -28,6 +28,7 @@ async-compression = "0.4.8"
async-stream = "0.3.5"
async-trait = "0.1.80"
backhand = "0.15.0"
base64 = "0.22.0"
byteorder = "1"
bytes = "1.5.0"
cgroups-rs = "0.3.4"
@ -61,17 +62,17 @@ prost-build = "0.12.4"
prost-reflect-build = "0.13.0"
prost-types = "0.12.4"
rand = "0.8.5"
ratatui = "0.26.1"
redb = "2.0.0"
ratatui = "0.26.2"
redb = "2.1.0"
rtnetlink = "0.14.1"
scopeguard = "1.2.0"
serde_json = "1.0.113"
serde_json = "1.0.116"
serde_yaml = "0.9"
sha256 = "1.5.0"
signal-hook = "0.3.17"
slice-copy = "0.3.0"
smoltcp = "0.11.0"
sysinfo = "0.30.10"
sysinfo = "0.30.11"
termtree = "0.4.1"
thiserror = "1.0"
tokio-tun = "0.11.4"
@ -91,12 +92,12 @@ version = "0.13.1"
features = ["derive"]
[workspace.dependencies.reqwest]
version = "0.12.3"
version = "0.12.4"
default-features = false
features = ["rustls-tls"]
[workspace.dependencies.serde]
version = "1.0.196"
version = "1.0.198"
features = ["derive"]
[workspace.dependencies.sys-mount]

4
DEV.md
View File

@ -69,8 +69,8 @@ $ ./hack/kernel/build.sh
```
7. Copy the guest kernel image at `target/kernel/kernel-x86_64` to `/var/lib/krata/guest/kernel` to have it automatically detected by kratad.
8. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
9. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
10. Run kratactl to launch a guest:
```sh

View File

@ -11,6 +11,7 @@ resolver = "2"
[dependencies]
anyhow = { workspace = true }
async-stream = { workspace = true }
base64 = { workspace = true }
clap = { workspace = true }
comfy-table = { workspace = true }
crossterm = { workspace = true, features = ["event-stream"] }
@ -19,11 +20,12 @@ env_logger = { workspace = true }
fancy-duration = { workspace = true }
human_bytes = { workspace = true }
indicatif = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.10" }
log = { workspace = true }
prost-reflect = { workspace = true, features = ["serde"] }
prost-types = { workspace = true }
ratatui = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
termtree = { workspace = true }

View File

@ -0,0 +1,70 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use krata::v1::{
common::{GuestTaskSpec, GuestTaskSpecEnvVar},
control::{control_service_client::ControlServiceClient, ExecGuestRequest},
};
use tonic::{transport::Channel, Request};
use crate::console::StdioConsoleStream;
use super::resolve_guest;
#[derive(Parser)]
#[command(about = "Execute a command inside the guest")]
pub struct ExecCommand {
#[arg[short, long, help = "Environment variables"]]
env: Option<Vec<String>>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(help = "Guest to exec inside, either the name or the uuid")]
guest: String,
#[arg(
allow_hyphen_values = true,
trailing_var_arg = true,
help = "Command to run inside the guest"
)]
command: Vec<String>,
}
impl ExecCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
let initial = ExecGuestRequest {
guest_id,
task: Some(GuestTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
.iter()
.map(|(key, value)| GuestTaskSpecEnvVar {
key: key.clone(),
value: value.clone(),
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
}),
data: vec![],
};
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
let response = client.exec_guest(Request::new(stream)).await?.into_inner();
let code = StdioConsoleStream::exec_output(response).await?;
std::process::exit(code);
}
}
fn env_map(env: &[String]) -> HashMap<String, String> {
let mut map = HashMap::<String, String>::new();
for item in env {
if let Some((key, value)) = item.split_once('=') {
map.insert(key.to_string(), value.to_string());
}
}
map
}

View File

@ -0,0 +1,22 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{control_service_client::ControlServiceClient, IdentifyHostRequest};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Identify information about the host")]
pub struct IdentifyHostCommand {}
impl IdentifyHostCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.identify_host(Request::new(IdentifyHostRequest {}))
.await?
.into_inner();
println!("Host UUID: {}", response.host_uuid);
println!("Host Domain: {}", response.host_domid);
println!("Krata Version: {}", response.krata_version);
Ok(())
}
}

View File

@ -1,14 +1,18 @@
use anyhow::Result;
use base64::Engine;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
idm::{internal, serialize::IdmSerializable, transport::IdmTransportPacketForm},
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio_stream::StreamExt;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
use crate::format::{kv2line, proto2dynamic, value2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum IdmSnoopFormat {
@ -34,19 +38,22 @@ impl IdmSnoopCommand {
while let Some(reply) = stream.next().await {
let reply = reply?;
let Some(line) = convert_idm_snoop(reply) else {
continue;
};
match self.format {
IdmSnoopFormat::Simple => {
self.print_simple(reply)?;
self.print_simple(line)?;
}
IdmSnoopFormat::Jsonl => {
let value = serde_json::to_value(proto2dynamic(reply)?)?;
let encoded = serde_json::to_string(&value)?;
let encoded = serde_json::to_string(&line)?;
println!("{}", encoded.trim());
}
IdmSnoopFormat::KeyValue => {
self.print_key_value(reply)?;
self.print_key_value(line)?;
}
}
}
@ -54,21 +61,97 @@ impl IdmSnoopCommand {
Ok(())
}
fn print_simple(&self, reply: SnoopIdmReply) -> Result<()> {
let from = reply.from;
let to = reply.to;
let Some(packet) = reply.packet else {
return Ok(());
fn print_simple(&self, line: IdmSnoopLine) -> Result<()> {
let encoded = if !line.packet.decoded.is_null() {
serde_json::to_string(&line.packet.decoded)?
} else {
base64::prelude::BASE64_STANDARD.encode(&line.packet.data)
};
let value = serde_json::to_value(proto2dynamic(packet)?)?;
let encoded = serde_json::to_string(&value)?;
println!("({} -> {}) {}", from, to, encoded);
println!(
"({} -> {}) {} {} {}",
line.from, line.to, line.packet.id, line.packet.form, encoded
);
Ok(())
}
fn print_key_value(&self, reply: SnoopIdmReply) -> Result<()> {
let kvs = proto2kv(reply)?;
fn print_key_value(&self, line: IdmSnoopLine) -> Result<()> {
let kvs = value2kv(serde_json::to_value(line)?)?;
println!("{}", kv2line(kvs));
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopLine {
pub from: String,
pub to: String,
pub packet: IdmSnoopData,
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopData {
pub id: u64,
pub channel: u64,
pub form: String,
pub data: String,
pub decoded: Value,
}
pub fn convert_idm_snoop(reply: SnoopIdmReply) -> Option<IdmSnoopLine> {
let packet = &(reply.packet?);
let decoded = if packet.channel == 0 {
match packet.form() {
IdmTransportPacketForm::Event => internal::Event::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok()),
IdmTransportPacketForm::Request
| IdmTransportPacketForm::StreamRequest
| IdmTransportPacketForm::StreamRequestUpdate => {
internal::Request::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
IdmTransportPacketForm::Response | IdmTransportPacketForm::StreamResponseUpdate => {
internal::Response::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
_ => None,
}
} else {
None
};
let decoded = decoded
.and_then(|message| serde_json::to_value(message).ok())
.unwrap_or(Value::Null);
let data = IdmSnoopData {
id: packet.id,
channel: packet.channel,
form: match packet.form() {
IdmTransportPacketForm::Raw => "raw".to_string(),
IdmTransportPacketForm::Event => "event".to_string(),
IdmTransportPacketForm::Request => "request".to_string(),
IdmTransportPacketForm::Response => "response".to_string(),
IdmTransportPacketForm::StreamRequest => "stream-request".to_string(),
IdmTransportPacketForm::StreamRequestUpdate => "stream-request-update".to_string(),
IdmTransportPacketForm::StreamRequestClosed => "stream-request-closed".to_string(),
IdmTransportPacketForm::StreamResponseUpdate => "stream-response-update".to_string(),
IdmTransportPacketForm::StreamResponseClosed => "stream-response-closed".to_string(),
_ => format!("unknown-{}", packet.form),
},
data: base64::prelude::BASE64_STANDARD.encode(&packet.data),
decoded,
};
Some(IdmSnoopLine {
from: reply.from,
to: reply.to,
packet: data,
})
}

View File

@ -1,13 +1,13 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
v1::{
common::{
guest_image_spec::Image, GuestImageSpec, GuestOciImageFormat, GuestOciImageSpec,
GuestSpec, GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar,
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestStatus,
GuestTaskSpec, GuestTaskSpecEnvVar, OciImageFormat,
},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
@ -21,13 +21,19 @@ use tonic::{transport::Channel, Request};
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
use super::pull::PullImageFormat;
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
pub enum LaunchImageFormat {
Squashfs,
Erofs,
}
#[derive(Parser)]
#[command(about = "Launch a new guest")]
pub struct LauchCommand {
#[arg(short = 'S', long, default_value = "squashfs", help = "Image format")]
image_format: PullImageFormat,
pub struct LaunchCommand {
#[arg(long, default_value = "squashfs", help = "Image format")]
image_format: LaunchImageFormat,
#[arg(long, help = "Overwrite image cache on pull")]
pull_overwrite_cache: bool,
#[arg(short, long, help = "Name of the guest")]
name: Option<String>,
#[arg(
@ -58,6 +64,12 @@ pub struct LauchCommand {
help = "Wait for the guest to start, implied by --attach"
)]
wait: bool,
#[arg(short = 'k', long, help = "OCI kernel image for guest to use")]
kernel: Option<String>,
#[arg(short = 'I', long, help = "OCI initrd image for guest to use")]
initrd: Option<String>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(help = "Container image for guest to use")]
oci: String,
#[arg(
@ -68,32 +80,47 @@ pub struct LauchCommand {
command: Vec<String>,
}
impl LauchCommand {
impl LaunchCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let response = client
.pull_image(PullImageRequest {
image: self.oci.clone(),
format: match self.image_format {
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
let image = self
.pull_image(
&mut client,
&self.oci,
match self.image_format {
LaunchImageFormat::Squashfs => OciImageFormat::Squashfs,
LaunchImageFormat::Erofs => OciImageFormat::Erofs,
},
})
)
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
let kernel = if let Some(ref kernel) = self.kernel {
let kernel_image = self
.pull_image(&mut client, kernel, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let initrd = if let Some(ref initrd) = self.initrd {
let kernel_image = self
.pull_image(&mut client, initrd, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let request = CreateGuestRequest {
spec: Some(GuestSpec {
name: self.name.unwrap_or_default(),
image: Some(GuestImageSpec {
image: Some(Image::Oci(GuestOciImageSpec {
digest: reply.digest,
format: reply.format,
})),
}),
image: Some(image),
kernel,
initrd,
vcpus: self.cpus,
mem: self.mem,
task: Some(GuestTaskSpec {
@ -105,6 +132,7 @@ impl LauchCommand {
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
}),
annotations: vec![],
}),
@ -139,6 +167,28 @@ impl LauchCommand {
StdioConsoleStream::restore_terminal_mode();
std::process::exit(code.unwrap_or(0));
}
async fn pull_image(
&self,
client: &mut ControlServiceClient<Channel>,
image: &str,
format: OciImageFormat,
) -> Result<GuestImageSpec> {
let response = client
.pull_image(PullImageRequest {
image: image.to_string(),
format: format.into(),
overwrite_cache: self.pull_overwrite_cache,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
Ok(GuestImageSpec {
image: Some(Image::Oci(GuestOciImageSpec {
digest: reply.digest,
format: reply.format,
})),
})
}
}
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {

View File

@ -1,5 +1,7 @@
pub mod attach;
pub mod destroy;
pub mod exec;
pub mod identify_host;
pub mod idm_snoop;
pub mod launch;
pub mod list;
@ -20,9 +22,10 @@ use krata::{
use tonic::{transport::Channel, Request};
use self::{
attach::AttachCommand, destroy::DestroyCommand, idm_snoop::IdmSnoopCommand,
launch::LauchCommand, list::ListCommand, logs::LogsCommand, metrics::MetricsCommand,
pull::PullCommand, resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
attach::AttachCommand, destroy::DestroyCommand, exec::ExecCommand,
identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, launch::LaunchCommand,
list::ListCommand, logs::LogsCommand, metrics::MetricsCommand, pull::PullCommand,
resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
};
#[derive(Parser)]
@ -45,7 +48,7 @@ pub struct ControlCommand {
#[derive(Subcommand)]
pub enum Commands {
Launch(LauchCommand),
Launch(LaunchCommand),
Destroy(DestroyCommand),
List(ListCommand),
Attach(AttachCommand),
@ -56,6 +59,8 @@ pub enum Commands {
Metrics(MetricsCommand),
IdmSnoop(IdmSnoopCommand),
Top(TopCommand),
IdentifyHost(IdentifyHostCommand),
Exec(ExecCommand),
}
impl ControlCommand {
@ -107,6 +112,14 @@ impl ControlCommand {
Commands::Pull(pull) => {
pull.run(client).await?;
}
Commands::IdentifyHost(identify) => {
identify.run(client).await?;
}
Commands::Exec(exec) => {
exec.run(client).await?;
}
}
Ok(())
}

View File

@ -1,7 +1,7 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::v1::{
common::GuestOciImageFormat,
common::OciImageFormat,
control::{control_service_client::ControlServiceClient, PullImageRequest},
};
@ -13,6 +13,7 @@ use crate::pull::pull_interactive_progress;
pub enum PullImageFormat {
Squashfs,
Erofs,
Tar,
}
#[derive(Parser)]
@ -22,6 +23,8 @@ pub struct PullCommand {
image: String,
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
image_format: PullImageFormat,
#[arg(short = 'o', long, help = "Overwrite image cache")]
overwrite_cache: bool,
}
impl PullCommand {
@ -30,9 +33,11 @@ impl PullCommand {
.pull_image(PullImageRequest {
image: self.image.clone(),
format: match self.image_format {
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
PullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => OciImageFormat::Erofs.into(),
PullImageFormat::Tar => OciImageFormat::Tar.into(),
},
overwrite_cache: self.overwrite_cache,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;

View File

@ -1,4 +1,4 @@
use anyhow::Result;
use anyhow::{anyhow, Result};
use async_stream::stream;
use crossterm::{
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
@ -8,12 +8,15 @@ use krata::{
events::EventStream,
v1::{
common::GuestStatus,
control::{watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest},
control::{
watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest, ExecGuestReply,
ExecGuestRequest,
},
},
};
use log::debug;
use tokio::{
io::{stdin, stdout, AsyncReadExt, AsyncWriteExt},
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
task::JoinHandle,
};
use tokio_stream::{Stream, StreamExt};
@ -45,6 +48,31 @@ impl StdioConsoleStream {
}
}
pub async fn stdin_stream_exec(
initial: ExecGuestRequest,
) -> impl Stream<Item = ExecGuestRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
loop {
let size = match stdin.read(&mut buffer).await {
Ok(size) => size,
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
};
let data = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
yield ExecGuestRequest { guest_id: String::default(), task: None, data };
}
}
}
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
if stdin().is_tty() {
enable_raw_mode()?;
@ -62,6 +90,32 @@ impl StdioConsoleStream {
Ok(())
}
pub async fn exec_output(mut stream: Streaming<ExecGuestReply>) -> Result<i32> {
let mut stdout = stdout();
let mut stderr = stderr();
while let Some(reply) = stream.next().await {
let reply = reply?;
if !reply.stdout.is_empty() {
stdout.write_all(&reply.stdout).await?;
stdout.flush().await?;
}
if !reply.stderr.is_empty() {
stderr.write_all(&reply.stderr).await?;
stderr.flush().await?;
}
if reply.exited {
if reply.error.is_empty() {
return Ok(reply.exit_code);
} else {
return Err(anyhow!("exec failed: {}", reply.error));
}
}
}
Ok(-1)
}
pub async fn guest_exit_hook(
id: String,
events: EventStream,

View File

@ -4,7 +4,7 @@ use anyhow::Result;
use fancy_duration::FancyDuration;
use human_bytes::human_bytes;
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
use prost_reflect::{DynamicMessage, FieldDescriptor, ReflectMessage, Value as ReflectValue};
use prost_reflect::{DynamicMessage, ReflectMessage};
use prost_types::Value;
use termtree::Tree;
@ -15,64 +15,59 @@ pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
)?)
}
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
let message = proto2dynamic(proto)?;
pub fn value2kv(value: serde_json::Value) -> Result<HashMap<String, String>> {
let mut map = HashMap::new();
fn crawl(prefix: String, map: &mut HashMap<String, String>, value: serde_json::Value) {
fn dot(prefix: &str, next: String) -> String {
if prefix.is_empty() {
next.to_string()
} else {
format!("{}.{}", prefix, next)
}
}
fn crawl(
prefix: String,
field: Option<&FieldDescriptor>,
map: &mut HashMap<String, String>,
value: &ReflectValue,
) {
match value {
ReflectValue::Message(child) => {
for (field, field_value) in child.fields() {
let path = if prefix.is_empty() {
field.json_name().to_string()
} else {
format!("{}.{}", prefix, field.json_name())
};
crawl(path, Some(&field), map, field_value);
serde_json::Value::Null => {
map.insert(prefix, "null".to_string());
}
serde_json::Value::String(value) => {
map.insert(prefix, value);
}
serde_json::Value::Bool(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Number(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Array(value) => {
for (i, item) in value.into_iter().enumerate() {
let next = dot(&prefix, i.to_string());
crawl(next, map, item);
}
}
ReflectValue::EnumNumber(number) => {
if let Some(kind) = field.map(|x| x.kind()) {
if let Some(e) = kind.as_enum() {
if let Some(value) = e.get_value(*number) {
map.insert(prefix, value.name().to_string());
}
}
serde_json::Value::Object(value) => {
for (key, item) in value {
let next = dot(&prefix, key);
crawl(next, map, item);
}
}
ReflectValue::String(value) => {
map.insert(prefix.to_string(), value.clone());
}
ReflectValue::List(value) => {
for (x, value) in value.iter().enumerate() {
crawl(format!("{}.{}", prefix, x), field, map, value);
}
}
_ => {
map.insert(prefix.to_string(), value.to_string());
}
}
}
crawl(
"".to_string(),
None,
&mut map,
&ReflectValue::Message(message),
);
crawl("".to_string(), &mut map, value);
Ok(map)
}
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
let message = proto2dynamic(proto)?;
let value = serde_json::to_value(message)?;
value2kv(value)
}
pub fn kv2line(map: HashMap<String, String>) -> String {
map.iter()
.map(|(k, v)| format!("{}=\"{}\"", k, v.replace('"', "\\\"")))

View File

@ -1,20 +1,205 @@
use std::collections::HashMap;
use std::{
collections::{hash_map::Entry, HashMap},
time::Duration,
};
use anyhow::{anyhow, Result};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use krata::v1::control::{PullImageProgressLayerPhase, PullImageProgressPhase, PullImageReply};
use krata::v1::control::{
image_progress_indication::Indication, ImageProgressIndication, ImageProgressLayerPhase,
ImageProgressPhase, PullImageReply,
};
use tokio_stream::StreamExt;
use tonic::Streaming;
const SPINNER_STRINGS: &[&str] = &[
"[= ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ =]",
"[====================]",
];
fn progress_bar_for_indication(indication: &ImageProgressIndication) -> Option<ProgressBar> {
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => None,
Some(Indication::Bar(indic)) => {
let bar = ProgressBar::new(indic.total);
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Spinner(_)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Completed(indic)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
if !indic.message.is_empty() {
bar.finish_with_message(indic.message.clone());
} else {
bar.finish()
}
Some(bar)
}
}
}
fn configure_for_indication(
bar: &mut ProgressBar,
multi_progress: &mut MultiProgress,
indication: &ImageProgressIndication,
top_phase: Option<ImageProgressPhase>,
layer_phase: Option<ImageProgressLayerPhase>,
layer_id: Option<&str>,
) {
let prefix = if let Some(phase) = top_phase {
match phase {
ImageProgressPhase::Unknown => "unknown",
ImageProgressPhase::Started => "started",
ImageProgressPhase::Resolving => "resolving",
ImageProgressPhase::Resolved => "resolved",
ImageProgressPhase::ConfigDownload => "downloading",
ImageProgressPhase::LayerDownload => "downloading",
ImageProgressPhase::Assemble => "assembling",
ImageProgressPhase::Pack => "packing",
ImageProgressPhase::Complete => "complete",
}
} else if let Some(phase) = layer_phase {
match phase {
ImageProgressLayerPhase::Unknown => "unknown",
ImageProgressLayerPhase::Waiting => "waiting",
ImageProgressLayerPhase::Downloading => "downloading",
ImageProgressLayerPhase::Downloaded => "downloaded",
ImageProgressLayerPhase::Extracting => "extracting",
ImageProgressLayerPhase::Extracted => "extracted",
}
} else {
""
};
let prefix = prefix.to_string();
let id = if let Some(layer_id) = layer_id {
let hash = if let Some((_, hash)) = layer_id.split_once(':') {
hash
} else {
"unknown"
};
let small_hash = if hash.len() > 10 { &hash[0..10] } else { hash };
Some(format!("{:width$}", small_hash, width = 10))
} else {
None
};
let prefix = if let Some(id) = id {
format!("{} {:width$}", id, prefix, width = 11)
} else {
format!(" {:width$}", prefix, width = 11)
};
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => {
multi_progress.remove(bar);
return;
}
Some(Indication::Bar(indic)) => {
if indic.is_bytes {
bar.set_style(ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) eta: {eta}").unwrap().progress_chars("=>-"));
} else {
bar.set_style(
ProgressStyle::with_template(
"{prefix} [{bar:20} {msg} {human_pos}/{human_len} ({per_sec}/sec)",
)
.unwrap()
.progress_chars("=>-"),
);
}
bar.set_message(indic.message.clone());
bar.set_position(indic.current);
bar.set_length(indic.total);
}
Some(Indication::Spinner(indic)) => {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.set_message(indic.message.clone());
}
Some(Indication::Completed(indic)) => {
if bar.is_finished() {
return;
}
bar.disable_steady_tick();
bar.set_message(indic.message.clone());
if indic.total != 0 {
bar.set_position(indic.total);
bar.set_length(indic.total);
}
if bar.style().get_tick_str(0).contains('=') {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.finish_with_message(indic.message.clone());
} else if indic.is_bytes {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_total_bytes}")
.unwrap()
.progress_chars("=>-"),
);
} else {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg}")
.unwrap()
.progress_chars("=>-"),
);
}
bar.tick();
bar.enable_steady_tick(Duration::from_millis(100));
}
};
bar.set_prefix(prefix);
bar.tick();
}
pub async fn pull_interactive_progress(
mut stream: Streaming<PullImageReply>,
) -> Result<PullImageReply> {
let mut multi_progress: Option<(MultiProgress, HashMap<String, ProgressBar>)> = None;
let mut multi_progress = MultiProgress::new();
multi_progress.set_move_cursor(false);
let mut progresses = HashMap::new();
while let Some(reply) = stream.next().await {
let reply = reply?;
let reply = match reply {
Ok(reply) => reply,
Err(error) => {
multi_progress.clear()?;
return Err(error.into());
}
};
if reply.progress.is_none() && !reply.digest.is_empty() {
multi_progress.clear()?;
return Ok(reply);
}
@ -22,97 +207,62 @@ pub async fn pull_interactive_progress(
continue;
};
if multi_progress.is_none() {
multi_progress = Some((MultiProgress::new(), HashMap::new()));
for layer in &oci.layers {
let Some(ref indication) = layer.indication else {
continue;
};
let bar = match progresses.entry(layer.id.clone()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
None
}
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
None,
Some(layer.phase()),
Some(&layer.id),
);
}
}
let Some((multi_progress, progresses)) = multi_progress.as_mut() else {
continue;
};
if let Some(ref indication) = oci.indication {
let bar = match progresses.entry("root".to_string()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
match oci.phase() {
PullImageProgressPhase::Resolved
| PullImageProgressPhase::ConfigAcquire
| PullImageProgressPhase::LayerAcquire => {
if progresses.is_empty() && !oci.layers.is_empty() {
for layer in &oci.layers {
let bar = ProgressBar::new(layer.total);
bar.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert(layer.id.clone(), bar.clone());
multi_progress.add(bar);
}
}
for layer in oci.layers {
let Some(progress) = progresses.get_mut(&layer.id) else {
continue;
};
let phase = match layer.phase() {
PullImageProgressLayerPhase::Waiting => "waiting",
PullImageProgressLayerPhase::Downloading => "downloading",
PullImageProgressLayerPhase::Downloaded => "downloaded",
PullImageProgressLayerPhase::Extracting => "extracting",
PullImageProgressLayerPhase::Extracted => "extracted",
_ => "unknown",
};
let simple = if let Some((_, hash)) = layer.id.split_once(':') {
hash
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
"unknown"
};
let simple = if simple.len() > 10 {
&simple[0..10]
} else {
simple
};
let message = format!(
"{:width$} {:phwidth$}",
simple,
phase,
width = 10,
phwidth = 11
);
if message != progress.message() {
progress.set_message(message);
None
}
progress.update(|state| {
state.set_len(layer.total);
state.set_pos(layer.value);
});
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
Some(oci.phase()),
None,
None,
);
}
PullImageProgressPhase::Packing => {
for (key, bar) in &mut *progresses {
if key == "packing" {
continue;
}
bar.finish_and_clear();
multi_progress.remove(bar);
}
progresses.retain(|k, _| k == "packing");
if progresses.is_empty() {
let progress = ProgressBar::new(100);
progress.set_message("packing ");
progress.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert("packing".to_string(), progress);
}
let Some(progress) = progresses.get("packing") else {
continue;
};
progress.update(|state| {
state.set_len(oci.total);
state.set_pos(oci.value);
});
}
_ => {}
}
}
multi_progress.clear()?;
Err(anyhow!("never received final reply for image pull"))
}

View File

@ -17,15 +17,17 @@ circular-buffer = { workspace = true }
clap = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata-oci = { path = "../oci", version = "^0.0.9" }
krata-runtime = { path = "../runtime", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.10" }
krata-oci = { path = "../oci", version = "^0.0.10" }
krata-runtime = { path = "../runtime", version = "^0.0.10" }
log = { workspace = true }
prost = { workspace = true }
redb = { workspace = true }
scopeguard = { workspace = true }
signal-hook = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
krata-tokio-tar = { workspace = true }
tonic = { workspace = true, features = ["tls"] }
uuid = { workspace = true }

View File

@ -1,21 +1,9 @@
use anyhow::Result;
use clap::Parser;
use env_logger::Env;
use krata::dial::ControlDialAddress;
use kratad::Daemon;
use kratad::command::DaemonCommand;
use log::LevelFilter;
use std::{
str::FromStr,
sync::{atomic::AtomicBool, Arc},
};
#[derive(Parser)]
struct DaemonCommand {
#[arg(short, long, default_value = "unix:///var/lib/krata/daemon.socket")]
listen: String,
#[arg(short, long, default_value = "/var/lib/krata")]
store: String,
}
use std::sync::{atomic::AtomicBool, Arc};
#[tokio::main(flavor = "multi_thread", worker_threads = 10)]
async fn main() -> Result<()> {
@ -24,12 +12,8 @@ async fn main() -> Result<()> {
.init();
mask_sighup()?;
let args = DaemonCommand::parse();
let addr = ControlDialAddress::from_str(&args.listen)?;
let mut daemon = Daemon::new(args.store.clone()).await?;
daemon.listen(addr).await?;
Ok(())
let command = DaemonCommand::parse();
command.run().await
}
fn mask_sighup() -> Result<()> {

View File

@ -0,0 +1,36 @@
use anyhow::Result;
use clap::{CommandFactory, Parser};
use krata::dial::ControlDialAddress;
use std::str::FromStr;
use crate::Daemon;
#[derive(Parser)]
#[command(version, about = "Krata hypervisor daemon")]
pub struct DaemonCommand {
#[arg(
short,
long,
default_value = "unix:///var/lib/krata/daemon.socket",
help = "Listen address"
)]
listen: String,
#[arg(short, long, default_value = "/var/lib/krata", help = "Storage path")]
store: String,
}
impl DaemonCommand {
pub async fn run(self) -> Result<()> {
let addr = ControlDialAddress::from_str(&self.listen)?;
let mut daemon = Daemon::new(self.store.clone()).await?;
daemon.listen(addr).await?;
Ok(())
}
pub fn version() -> String {
DaemonCommand::command()
.get_version()
.unwrap_or("unknown")
.to_string()
}
}

View File

@ -1,6 +1,6 @@
use std::{collections::HashMap, sync::Arc};
use anyhow::Result;
use anyhow::{anyhow, Result};
use circular_buffer::CircularBuffer;
use kratart::channel::ChannelService;
use log::error;
@ -11,6 +11,9 @@ use tokio::{
},
task::JoinHandle,
};
use uuid::Uuid;
use crate::glt::GuestLookupTable;
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
@ -21,6 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
#[derive(Clone)]
pub struct DaemonConsoleHandle {
glt: GuestLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
sender: Sender<(u32, Vec<u8>)>,
@ -50,9 +54,12 @@ impl DaemonConsoleAttachHandle {
impl DaemonConsoleHandle {
pub async fn attach(
&self,
domid: u32,
uuid: Uuid,
sender: Sender<Vec<u8>>,
) -> Result<DaemonConsoleAttachHandle> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
let buffers = self.buffers.lock().await;
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
drop(buffers);
@ -77,6 +84,7 @@ impl Drop for DaemonConsoleHandle {
}
pub struct DaemonConsole {
glt: GuestLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
receiver: Receiver<(u32, Option<Vec<u8>>)>,
@ -85,13 +93,14 @@ pub struct DaemonConsole {
}
impl DaemonConsole {
pub async fn new() -> Result<DaemonConsole> {
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
let (service, sender, receiver) =
ChannelService::new("krata-console".to_string(), Some(0)).await?;
let task = service.launch().await?;
let listeners = Arc::new(Mutex::new(HashMap::new()));
let buffers = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonConsole {
glt,
listeners,
buffers,
receiver,
@ -101,6 +110,7 @@ impl DaemonConsole {
}
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
let glt = self.glt.clone();
let listeners = self.listeners.clone();
let buffers = self.buffers.clone();
let sender = self.sender.clone();
@ -110,6 +120,7 @@ impl DaemonConsole {
}
});
Ok(DaemonConsoleHandle {
glt,
listeners,
buffers,
sender,

View File

@ -1,15 +1,17 @@
use async_stream::try_stream;
use futures::Stream;
use krata::{
idm::protocol::{
idm_request::Request as IdmRequestType, idm_response::Response as IdmResponseType,
IdmMetricsRequest,
idm::internal::{
exec_stream_request_update::Update, request::Request as IdmRequestType,
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
},
v1::{
common::{Guest, GuestOciImageFormat, GuestState, GuestStatus},
common::{Guest, GuestState, GuestStatus, OciImageFormat},
control::{
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
ExecGuestReply, ExecGuestRequest, IdentifyHostReply, IdentifyHostRequest,
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
@ -18,7 +20,7 @@ use krata::{
};
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciImagePacked, OciPackedFormat},
packer::{service::OciPackerService, OciPackedFormat, OciPackedImage},
progress::{OciProgress, OciProgressContext},
};
use std::{pin::Pin, str::FromStr};
@ -32,7 +34,8 @@ use tonic::{Request, Response, Status, Streaming};
use uuid::Uuid;
use crate::{
console::DaemonConsoleHandle, db::GuestStore, event::DaemonEventContext, idm::DaemonIdmHandle,
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
event::DaemonEventContext, glt::GuestLookupTable, idm::DaemonIdmHandle,
metrics::idm_metric_to_api, oci::convert_oci_progress,
};
@ -56,6 +59,7 @@ impl From<ApiError> for Status {
#[derive(Clone)]
pub struct DaemonControlService {
glt: GuestLookupTable,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
@ -66,6 +70,7 @@ pub struct DaemonControlService {
impl DaemonControlService {
pub fn new(
glt: GuestLookupTable,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
@ -74,6 +79,7 @@ impl DaemonControlService {
packer: OciPackerService,
) -> Self {
Self {
glt,
events,
console,
idm,
@ -90,12 +96,15 @@ enum ConsoleDataSelect {
}
enum PullImageSelect {
Progress(usize),
Completed(Result<Result<OciImagePacked, anyhow::Error>, JoinError>),
Progress(Option<OciProgress>),
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
}
#[tonic::async_trait]
impl ControlService for DaemonControlService {
type ExecGuestStream =
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
type ConsoleDataStream =
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
@ -108,6 +117,18 @@ impl ControlService for DaemonControlService {
type SnoopIdmStream =
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
async fn identify_host(
&self,
request: Request<IdentifyHostRequest>,
) -> Result<Response<IdentifyHostReply>, Status> {
let _ = request.into_inner();
Ok(Response::new(IdentifyHostReply {
host_domid: self.glt.host_domid(),
host_uuid: self.glt.host_uuid().to_string(),
krata_version: DaemonCommand::version(),
}))
}
async fn create_guest(
&self,
request: Request<CreateGuestRequest>,
@ -130,6 +151,7 @@ impl ControlService for DaemonControlService {
network: None,
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
domid: u32::MAX,
}),
spec: Some(spec),
@ -148,6 +170,98 @@ impl ControlService for DaemonControlService {
}))
}
async fn exec_guest(
&self,
request: Request<Streaming<ExecGuestRequest>>,
) -> Result<Response<Self::ExecGuestStream>, Status> {
let mut input = request.into_inner();
let Some(request) = input.next().await else {
return Err(ApiError {
message: "expected to have at least one request".to_string(),
}
.into());
};
let request = request?;
let Some(task) = request.task else {
return Err(ApiError {
message: "task is missing".to_string(),
}
.into());
};
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm_request = IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Start(ExecStreamRequestStart {
environment: task
.environment
.into_iter()
.map(|x| ExecEnvVar {
key: x.key,
value: x.value,
})
.collect(),
command: task.command,
working_directory: task.working_directory,
})),
})),
};
let output = try_stream! {
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
message: x.to_string(),
})?;
loop {
select! {
x = input.next() => if let Some(update) = x {
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
message: error.to_string()
}.into());
if let Ok(update) = update {
if !update.data.is_empty() {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Stdin(ExecStreamRequestStdin {
data: update.data,
})),
}))}).await;
}
}
},
x = handle.receiver.recv() => match x {
Some(response) => {
let Some(IdmResponseType::ExecStream(update)) = response.response else {
break;
};
let reply = ExecGuestReply {
exited: update.exited,
error: update.error,
exit_code: update.exit_code,
stdout: update.stdout,
stderr: update.stderr
};
yield reply;
},
None => {
break;
}
}
};
}
};
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
}
async fn destroy_guest(
&self,
request: Request<DestroyGuestRequest>,
@ -230,36 +344,10 @@ impl ControlService for DaemonControlService {
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let guest = self
.guests
.read(uuid)
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?
.ok_or_else(|| ApiError {
message: "guest did not exist in the database".to_string(),
})?;
let Some(ref state) = guest.state else {
return Err(ApiError {
message: "guest did not have state".to_string(),
}
.into());
};
let domid = state.domid;
if domid == 0 {
return Err(ApiError {
message: "invalid domid on the guest".to_string(),
}
.into());
}
let (sender, mut receiver) = channel(100);
let console = self
.console
.attach(domid, sender)
.attach(uuid, sender)
.await
.map_err(|error| ApiError {
message: format!("failed to attach to console: {}", error),
@ -309,45 +397,21 @@ impl ControlService for DaemonControlService {
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let guest = self
.guests
.read(uuid)
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?
.ok_or_else(|| ApiError {
message: "guest did not exist in the database".to_string(),
})?;
let Some(ref state) = guest.state else {
return Err(ApiError {
message: "guest did not have state".to_string(),
}
.into());
};
let domid = state.domid;
if domid == 0 {
return Err(ApiError {
message: "invalid domid on the guest".to_string(),
}
.into());
}
let client = self.idm.client(domid).await.map_err(|error| ApiError {
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let response = client
.send(IdmRequestType::Metrics(IdmMetricsRequest {}))
.send(IdmRequest {
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
})
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?;
let mut reply = ReadGuestMetricsReply::default();
if let IdmResponseType::Metrics(metrics) = response {
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
reply.root = metrics.root.map(idm_metric_to_api);
}
Ok(Response::new(reply))
@ -362,36 +426,39 @@ impl ControlService for DaemonControlService {
message: err.to_string(),
})?;
let format = match request.format() {
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => OciPackedFormat::Tar,
};
let (sender, mut receiver) = channel::<OciProgress>(100);
let context = OciProgressContext::new(sender);
let (context, mut receiver) = OciProgressContext::create();
let our_packer = self.packer.clone();
let output = try_stream! {
let mut task = tokio::task::spawn(async move {
our_packer.request(name, format, context).await
our_packer.request(name, format, request.overwrite_cache, context).await
});
let abort_handle = task.abort_handle();
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
handle.abort();
});
loop {
let mut progresses = Vec::new();
let what = select! {
x = receiver.recv_many(&mut progresses, 10) => PullImageSelect::Progress(x),
x = receiver.changed() => match x {
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
Err(_) => PullImageSelect::Progress(None),
},
x = &mut task => PullImageSelect::Completed(x),
};
match what {
PullImageSelect::Progress(count) => {
if count > 0 {
let progress = progresses.remove(progresses.len() - 1);
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: GuestOciImageFormat::Unknown.into(),
};
yield reply;
}
PullImageSelect::Progress(Some(progress)) => {
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: OciImageFormat::Unknown.into(),
};
yield reply;
},
PullImageSelect::Completed(result) => {
@ -405,13 +472,18 @@ impl ControlService for DaemonControlService {
progress: None,
digest: packed.digest,
format: match packed.format {
OciPackedFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => GuestOciImageFormat::Erofs.into(),
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
},
};
yield reply;
break;
},
_ => {
continue;
}
}
}
};
@ -438,9 +510,16 @@ impl ControlService for DaemonControlService {
) -> Result<Response<Self::SnoopIdmStream>, Status> {
let _ = request.into_inner();
let mut messages = self.idm.snoop();
let glt = self.glt.clone();
let output = try_stream! {
while let Ok(event) = messages.recv().await {
yield SnoopIdmReply { from: event.from, to: event.to, packet: Some(event.packet) };
let Some(from_uuid) = glt.lookup_uuid_by_domid(event.from).await else {
continue;
};
let Some(to_uuid) = glt.lookup_uuid_by_domid(event.to).await else {
continue;
};
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
}
};
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))

View File

@ -6,7 +6,7 @@ use std::{
use anyhow::Result;
use krata::{
idm::protocol::{idm_event::Event, IdmEvent},
idm::{internal::event::Event as EventType, internal::Event},
v1::common::{GuestExitInfo, GuestState, GuestStatus},
};
use log::{error, warn};
@ -50,8 +50,8 @@ pub struct DaemonEventGenerator {
feed: broadcast::Receiver<DaemonEvent>,
idm: DaemonIdmHandle,
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
idm_sender: Sender<(u32, IdmEvent)>,
idm_receiver: Receiver<(u32, IdmEvent)>,
idm_sender: Sender<(u32, Event)>,
idm_receiver: Receiver<(u32, Event)>,
_event_sender: broadcast::Sender<DaemonEvent>,
}
@ -93,7 +93,7 @@ impl DaemonEventGenerator {
match status {
GuestStatus::Started => {
if let Entry::Vacant(e) = self.idms.entry(domid) {
let client = self.idm.client(domid).await?;
let client = self.idm.client_by_domid(domid).await?;
let mut receiver = client.subscribe().await?;
let sender = self.idm_sender.clone();
let task = tokio::task::spawn(async move {
@ -122,9 +122,9 @@ impl DaemonEventGenerator {
Ok(())
}
async fn handle_idm_event(&mut self, id: Uuid, event: IdmEvent) -> Result<()> {
async fn handle_idm_event(&mut self, id: Uuid, event: Event) -> Result<()> {
match event.event {
Some(Event::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
Some(EventType::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
None => Ok(()),
}
}
@ -136,6 +136,7 @@ impl DaemonEventGenerator {
network: guest.state.clone().unwrap_or_default().network,
exit_info: Some(GuestExitInfo { code }),
error_info: None,
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
});

69
crates/daemon/src/glt.rs Normal file
View File

@ -0,0 +1,69 @@
use std::{collections::HashMap, sync::Arc};
use tokio::sync::RwLock;
use uuid::Uuid;
struct GuestLookupTableState {
domid_to_uuid: HashMap<u32, Uuid>,
uuid_to_domid: HashMap<Uuid, u32>,
}
impl GuestLookupTableState {
pub fn new(host_uuid: Uuid) -> Self {
let mut domid_to_uuid = HashMap::new();
let mut uuid_to_domid = HashMap::new();
domid_to_uuid.insert(0, host_uuid);
uuid_to_domid.insert(host_uuid, 0);
GuestLookupTableState {
domid_to_uuid,
uuid_to_domid,
}
}
}
#[derive(Clone)]
pub struct GuestLookupTable {
host_domid: u32,
host_uuid: Uuid,
state: Arc<RwLock<GuestLookupTableState>>,
}
impl GuestLookupTable {
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
GuestLookupTable {
host_domid,
host_uuid,
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
}
}
pub fn host_uuid(&self) -> Uuid {
self.host_uuid
}
pub fn host_domid(&self) -> u32 {
self.host_domid
}
pub async fn lookup_uuid_by_domid(&self, domid: u32) -> Option<Uuid> {
let state = self.state.read().await;
state.domid_to_uuid.get(&domid).cloned()
}
pub async fn lookup_domid_by_uuid(&self, uuid: &Uuid) -> Option<u32> {
let state = self.state.read().await;
state.uuid_to_domid.get(uuid).cloned()
}
pub async fn associate(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.insert(uuid, domid);
state.domid_to_uuid.insert(domid, uuid);
}
pub async fn remove(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.remove(&uuid);
state.domid_to_uuid.remove(&domid);
}
}

View File

@ -6,8 +6,9 @@ use std::{
use anyhow::{anyhow, Result};
use bytes::{Buf, BytesMut};
use krata::idm::{
client::{IdmBackend, IdmClient},
protocol::IdmPacket,
client::{IdmBackend, IdmInternalClient},
internal::INTERNAL_IDM_CHANNEL,
transport::IdmTransportPacket,
};
use kratart::channel::ChannelService;
use log::{error, warn};
@ -21,15 +22,19 @@ use tokio::{
},
task::JoinHandle,
};
use uuid::Uuid;
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmPacket>>>>;
type ClientMap = Arc<Mutex<HashMap<u32, IdmClient>>>;
use crate::glt::GuestLookupTable;
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
#[derive(Clone)]
pub struct DaemonIdmHandle {
glt: GuestLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmPacket)>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
task: Arc<JoinHandle<()>>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
}
@ -39,7 +44,14 @@ impl DaemonIdmHandle {
self.snoop_sender.subscribe()
}
pub async fn client(&self, domid: u32) -> Result<IdmClient> {
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
self.client_by_domid(domid).await
}
pub async fn client_by_domid(&self, domid: u32) -> Result<IdmInternalClient> {
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
}
}
@ -56,22 +68,23 @@ impl Drop for DaemonIdmHandle {
pub struct DaemonIdmSnoopPacket {
pub from: u32,
pub to: u32,
pub packet: IdmPacket,
pub packet: IdmTransportPacket,
}
pub struct DaemonIdm {
glt: GuestLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmPacket)>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
tx_raw_sender: Sender<(u32, Vec<u8>)>,
tx_receiver: Receiver<(u32, IdmPacket)>,
tx_receiver: Receiver<(u32, IdmTransportPacket)>,
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
task: JoinHandle<()>,
}
impl DaemonIdm {
pub async fn new() -> Result<DaemonIdm> {
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
let (service, tx_raw_sender, rx_receiver) =
ChannelService::new("krata-channel".to_string(), None).await?;
let (tx_sender, tx_receiver) = channel(100);
@ -80,6 +93,7 @@ impl DaemonIdm {
let clients = Arc::new(Mutex::new(HashMap::new()));
let feeds = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonIdm {
glt,
rx_receiver,
tx_receiver,
tx_sender,
@ -92,6 +106,7 @@ impl DaemonIdm {
}
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
let glt = self.glt.clone();
let clients = self.clients.clone();
let feeds = self.feeds.clone();
let tx_sender = self.tx_sender.clone();
@ -104,6 +119,7 @@ impl DaemonIdm {
}
});
Ok(DaemonIdmHandle {
glt,
clients,
feeds,
tx_sender,
@ -136,7 +152,7 @@ impl DaemonIdm {
}
let mut packet = buffer.split_to(needed);
packet.advance(6);
match IdmPacket::decode(packet) {
match IdmTransportPacket::decode(packet) {
Ok(packet) => {
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
let guard = self.feeds.lock().await;
@ -196,10 +212,10 @@ impl Drop for DaemonIdm {
async fn client_or_create(
domid: u32,
tx_sender: &Sender<(u32, IdmPacket)>,
tx_sender: &Sender<(u32, IdmTransportPacket)>,
clients: &ClientMap,
feeds: &BackendFeedMap,
) -> Result<IdmClient> {
) -> Result<IdmInternalClient> {
let mut clients = clients.lock().await;
let mut feeds = feeds.lock().await;
match clients.entry(domid) {
@ -212,7 +228,11 @@ async fn client_or_create(
rx_receiver,
tx_sender: tx_sender.clone(),
};
let client = IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await?;
let client = IdmInternalClient::new(
INTERNAL_IDM_CHANNEL,
Box::new(backend) as Box<dyn IdmBackend>,
)
.await?;
entry.insert(client.clone());
Ok(client)
}
@ -221,13 +241,13 @@ async fn client_or_create(
pub struct IdmDaemonBackend {
domid: u32,
rx_receiver: Receiver<IdmPacket>,
tx_sender: Sender<(u32, IdmPacket)>,
rx_receiver: Receiver<IdmTransportPacket>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
}
#[async_trait::async_trait]
impl IdmBackend for IdmDaemonBackend {
async fn recv(&mut self) -> Result<IdmPacket> {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
if let Some(packet) = self.rx_receiver.recv().await {
Ok(packet)
} else {
@ -235,7 +255,7 @@ impl IdmBackend for IdmDaemonBackend {
}
}
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
self.tx_sender.send((self.domid, packet)).await?;
Ok(())
}

View File

@ -1,10 +1,11 @@
use std::{net::SocketAddr, path::PathBuf, str::FromStr};
use anyhow::Result;
use anyhow::{anyhow, Result};
use console::{DaemonConsole, DaemonConsoleHandle};
use control::DaemonControlService;
use db::GuestStore;
use event::{DaemonEventContext, DaemonEventGenerator};
use glt::GuestLookupTable;
use idm::{DaemonIdm, DaemonIdmHandle};
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
@ -21,10 +22,12 @@ use tokio_stream::wrappers::UnixListenerStream;
use tonic::transport::{Identity, Server, ServerTlsConfig};
use uuid::Uuid;
pub mod command;
pub mod console;
pub mod control;
pub mod db;
pub mod event;
pub mod glt;
pub mod idm;
pub mod metrics;
pub mod oci;
@ -32,6 +35,7 @@ pub mod reconcile;
pub struct Daemon {
store: String,
glt: GuestLookupTable,
guests: GuestStore,
events: DaemonEventContext,
guest_reconciler_task: JoinHandle<()>,
@ -51,27 +55,52 @@ impl Daemon {
image_cache_dir.push("image");
fs::create_dir_all(&image_cache_dir).await?;
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current())?;
let mut host_uuid_path = PathBuf::from(store.clone());
host_uuid_path.push("host.uuid");
let host_uuid = if host_uuid_path.is_file() {
let content = fs::read_to_string(&host_uuid_path).await?;
Uuid::from_str(content.trim()).ok()
} else {
None
};
let runtime = Runtime::new(store.clone()).await?;
let host_uuid = if let Some(host_uuid) = host_uuid {
host_uuid
} else {
let generated = Uuid::new_v4();
let mut string = generated.to_string();
string.push('\n');
fs::write(&host_uuid_path, string).await?;
generated
};
let initrd_path = detect_guest_file(&store, "initrd")?;
let kernel_path = detect_guest_file(&store, "kernel")?;
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current()).await?;
let runtime = Runtime::new().await?;
let glt = GuestLookupTable::new(0, host_uuid);
let guests_db_path = format!("{}/guests.db", store);
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
let (guest_reconciler_notify, guest_reconciler_receiver) =
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
let idm = DaemonIdm::new().await?;
let idm = DaemonIdm::new(glt.clone()).await?;
let idm = idm.launch().await?;
let console = DaemonConsole::new().await?;
let console = DaemonConsole::new(glt.clone()).await?;
let console = console.launch().await?;
let (events, generator) =
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
.await?;
let runtime_for_reconciler = runtime.dupe().await?;
let guest_reconciler = GuestReconciler::new(
glt.clone(),
guests.clone(),
events.clone(),
runtime_for_reconciler,
packer.clone(),
guest_reconciler_notify.clone(),
kernel_path,
initrd_path,
)?;
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
@ -79,6 +108,7 @@ impl Daemon {
Ok(Self {
store,
glt,
guests,
events,
guest_reconciler_task,
@ -92,6 +122,7 @@ impl Daemon {
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
let control_service = DaemonControlService::new(
self.glt.clone(),
self.events.clone(),
self.console.clone(),
self.idm.clone(),
@ -154,3 +185,16 @@ impl Drop for Daemon {
self.generator_task.abort();
}
}
fn detect_guest_file(store: &str, name: &str) -> Result<PathBuf> {
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
if path.is_file() {
return Ok(path);
}
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
if path.is_file() {
return Ok(path);
}
Err(anyhow!("unable to find required guest file: {}", name))
}

View File

@ -1,18 +1,18 @@
use krata::{
idm::protocol::{IdmMetricFormat, IdmMetricNode},
idm::internal::{MetricFormat, MetricNode},
v1::common::{GuestMetricFormat, GuestMetricNode},
};
fn idm_metric_format_to_api(format: IdmMetricFormat) -> GuestMetricFormat {
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
match format {
IdmMetricFormat::Unknown => GuestMetricFormat::Unknown,
IdmMetricFormat::Bytes => GuestMetricFormat::Bytes,
IdmMetricFormat::Integer => GuestMetricFormat::Integer,
IdmMetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
MetricFormat::Unknown => GuestMetricFormat::Unknown,
MetricFormat::Bytes => GuestMetricFormat::Bytes,
MetricFormat::Integer => GuestMetricFormat::Integer,
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
}
}
pub fn idm_metric_to_api(node: IdmMetricNode) -> GuestMetricNode {
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
let format = node.format();
GuestMetricNode {
name: node.name,

View File

@ -1,33 +1,72 @@
use krata::v1::control::{
PullImageProgress, PullImageProgressLayer, PullImageProgressLayerPhase, PullImageProgressPhase,
image_progress_indication::Indication, ImageProgress, ImageProgressIndication,
ImageProgressIndicationBar, ImageProgressIndicationCompleted, ImageProgressIndicationHidden,
ImageProgressIndicationSpinner, ImageProgressLayer, ImageProgressLayerPhase,
ImageProgressPhase,
};
use krataoci::progress::{
OciProgress, OciProgressIndication, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase,
};
use krataoci::progress::{OciProgress, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase};
fn convert_oci_layer_progress(layer: OciProgressLayer) -> PullImageProgressLayer {
PullImageProgressLayer {
id: layer.id,
phase: match layer.phase {
OciProgressLayerPhase::Waiting => PullImageProgressLayerPhase::Waiting,
OciProgressLayerPhase::Downloading => PullImageProgressLayerPhase::Downloading,
OciProgressLayerPhase::Downloaded => PullImageProgressLayerPhase::Downloaded,
OciProgressLayerPhase::Extracting => PullImageProgressLayerPhase::Extracting,
OciProgressLayerPhase::Extracted => PullImageProgressLayerPhase::Extracted,
}
.into(),
value: layer.value,
total: layer.total,
fn convert_oci_progress_indication(indication: OciProgressIndication) -> ImageProgressIndication {
ImageProgressIndication {
indication: Some(match indication {
OciProgressIndication::Hidden => Indication::Hidden(ImageProgressIndicationHidden {}),
OciProgressIndication::ProgressBar {
message,
current,
total,
bytes,
} => Indication::Bar(ImageProgressIndicationBar {
message: message.unwrap_or_default(),
current,
total,
is_bytes: bytes,
}),
OciProgressIndication::Spinner { message } => {
Indication::Spinner(ImageProgressIndicationSpinner {
message: message.unwrap_or_default(),
})
}
OciProgressIndication::Completed {
message,
total,
bytes,
} => Indication::Completed(ImageProgressIndicationCompleted {
message: message.unwrap_or_default(),
total: total.unwrap_or(0),
is_bytes: bytes,
}),
}),
}
}
pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
PullImageProgress {
fn convert_oci_layer_progress(layer: OciProgressLayer) -> ImageProgressLayer {
ImageProgressLayer {
id: layer.id,
phase: match layer.phase {
OciProgressLayerPhase::Waiting => ImageProgressLayerPhase::Waiting,
OciProgressLayerPhase::Downloading => ImageProgressLayerPhase::Downloading,
OciProgressLayerPhase::Downloaded => ImageProgressLayerPhase::Downloaded,
OciProgressLayerPhase::Extracting => ImageProgressLayerPhase::Extracting,
OciProgressLayerPhase::Extracted => ImageProgressLayerPhase::Extracted,
}
.into(),
indication: Some(convert_oci_progress_indication(layer.indication)),
}
}
pub fn convert_oci_progress(oci: OciProgress) -> ImageProgress {
ImageProgress {
phase: match oci.phase {
OciProgressPhase::Resolving => PullImageProgressPhase::Resolving,
OciProgressPhase::Resolved => PullImageProgressPhase::Resolved,
OciProgressPhase::ConfigAcquire => PullImageProgressPhase::ConfigAcquire,
OciProgressPhase::LayerAcquire => PullImageProgressPhase::LayerAcquire,
OciProgressPhase::Packing => PullImageProgressPhase::Packing,
OciProgressPhase::Complete => PullImageProgressPhase::Complete,
OciProgressPhase::Started => ImageProgressPhase::Started,
OciProgressPhase::Resolving => ImageProgressPhase::Resolving,
OciProgressPhase::Resolved => ImageProgressPhase::Resolved,
OciProgressPhase::ConfigDownload => ImageProgressPhase::ConfigDownload,
OciProgressPhase::LayerDownload => ImageProgressPhase::LayerDownload,
OciProgressPhase::Assemble => ImageProgressPhase::Assemble,
OciProgressPhase::Pack => ImageProgressPhase::Pack,
OciProgressPhase::Complete => ImageProgressPhase::Complete,
}
.into(),
layers: oci
@ -35,7 +74,6 @@ pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
.into_values()
.map(convert_oci_layer_progress)
.collect::<Vec<_>>(),
value: oci.value,
total: oci.total,
indication: Some(convert_oci_progress_indication(oci.indication)),
}
}

View File

@ -1,20 +1,17 @@
use std::{
collections::{hash_map::Entry, HashMap},
path::PathBuf,
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, Result};
use krata::launchcfg::LaunchPackedFormat;
use anyhow::Result;
use krata::v1::{
common::{
guest_image_spec::Image, Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState,
GuestOciImageFormat, GuestState, GuestStatus,
},
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
control::GuestChangedEvent,
};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::{launch::GuestLaunchRequest, GuestInfo, Runtime};
use krataoci::packer::service::OciPackerService;
use kratart::{GuestInfo, Runtime};
use log::{error, info, trace, warn};
use tokio::{
select,
@ -30,8 +27,13 @@ use uuid::Uuid;
use crate::{
db::GuestStore,
event::{DaemonEvent, DaemonEventContext},
glt::GuestLookupTable,
};
use self::start::GuestStarter;
mod start;
const PARALLEL_LIMIT: u32 = 5;
#[derive(Debug)]
@ -53,28 +55,38 @@ impl Drop for GuestReconcilerEntry {
#[derive(Clone)]
pub struct GuestReconciler {
glt: GuestLookupTable,
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
kernel_path: PathBuf,
initrd_path: PathBuf,
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
guest_reconciler_notify: Sender<Uuid>,
reconcile_lock: Arc<RwLock<()>>,
}
impl GuestReconciler {
#[allow(clippy::too_many_arguments)]
pub fn new(
glt: GuestLookupTable,
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
guest_reconciler_notify: Sender<Uuid>,
kernel_path: PathBuf,
initrd_path: PathBuf,
) -> Result<Self> {
Ok(Self {
glt,
guests,
events,
runtime,
packer,
kernel_path,
initrd_path,
tasks: Arc::new(Mutex::new(HashMap::new())),
guest_reconciler_notify,
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
@ -123,6 +135,23 @@ impl GuestReconciler {
trace!("reconciling runtime");
let runtime_guests = self.runtime.list().await?;
let stored_guests = self.guests.list().await?;
let non_existent_guests = runtime_guests
.iter()
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
.collect::<Vec<_>>();
for guest in non_existent_guests {
warn!("destroying unknown runtime guest {}", guest.uuid);
if let Err(error) = self.runtime.destroy(guest.uuid).await {
error!(
"failed to destroy unknown runtime guest {}: {}",
guest.uuid, error
);
}
self.guests.remove(guest.uuid).await?;
}
for (uuid, mut stored_guest) in stored_guests {
let previous_guest = stored_guest.clone();
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
@ -136,6 +165,7 @@ impl GuestReconciler {
}
Some(runtime) => {
self.glt.associate(uuid, runtime.domid).await;
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
if let Some(code) = runtime.state.exit_code {
state.status = GuestStatus::Exited.into();
@ -224,71 +254,14 @@ impl GuestReconciler {
}
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
let Some(ref spec) = guest.spec else {
return Err(anyhow!("guest spec not specified"));
let starter = GuestStarter {
kernel_path: &self.kernel_path,
initrd_path: &self.initrd_path,
packer: &self.packer,
glt: &self.glt,
runtime: &self.runtime,
};
let Some(ref image) = spec.image else {
return Err(anyhow!("image spec not provided"));
};
let oci = match image.image {
Some(Image::Oci(ref oci)) => oci,
None => {
return Err(anyhow!("oci spec not specified"));
}
};
let task = spec.task.as_ref().cloned().unwrap_or_default();
let image = self
.packer
.recall(
&oci.digest,
match oci.format() {
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
},
)
.await?;
let Some(image) = image else {
return Err(anyhow!(
"image {} in the requested format did not exist",
oci.digest
));
};
let info = self
.runtime
.launch(GuestLaunchRequest {
format: LaunchPackedFormat::Squashfs,
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
} else {
Some(spec.name.clone())
},
image,
vcpus: spec.vcpus,
mem: spec.mem,
env: task
.environment
.iter()
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
debug: false,
})
.await?;
info!("started guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Started.into(),
network: Some(guestinfo_to_networkstate(&info)),
exit_info: None,
error_info: None,
domid: info.domid,
});
Ok(GuestReconcilerResult::Changed { rerun: false })
starter.start(uuid, guest).await
}
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
@ -305,13 +278,20 @@ impl GuestReconciler {
trace!("failed to destroy runtime guest {}: {}", uuid, error);
}
let domid = guest.state.as_ref().map(|x| x.domid);
if let Some(domid) = domid {
self.glt.remove(uuid, domid).await;
}
info!("destroyed guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Destroyed.into(),
network: None,
exit_info: None,
error_info: None,
domid: guest.state.as_ref().map(|x| x.domid).unwrap_or(u32::MAX),
host: self.glt.host_uuid().to_string(),
domid: domid.unwrap_or(u32::MAX),
});
Ok(GuestReconcilerResult::Changed { rerun: false })
}
@ -356,15 +336,7 @@ impl GuestReconciler {
}
}
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
if value.is_empty() {
None
} else {
Some(value)
}
}
fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
GuestNetworkState {
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),

View File

@ -0,0 +1,182 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use futures::StreamExt;
use krata::launchcfg::LaunchPackedFormat;
use krata::v1::common::GuestOciImageSpec;
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::{launch::GuestLaunchRequest, Runtime};
use log::info;
use tokio::fs::{self, File};
use tokio::io::AsyncReadExt;
use tokio_tar::Archive;
use uuid::Uuid;
use crate::{
glt::GuestLookupTable,
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
};
// if a kernel is >= 100MB, that's kinda scary.
const OCI_SPEC_TAR_FILE_MAX_SIZE: usize = 100 * 1024 * 1024;
pub struct GuestStarter<'a> {
pub kernel_path: &'a Path,
pub initrd_path: &'a Path,
pub packer: &'a OciPackerService,
pub glt: &'a GuestLookupTable,
pub runtime: &'a Runtime,
}
impl GuestStarter<'_> {
pub async fn oci_spec_tar_read_file(
&self,
file: &Path,
oci: &GuestOciImageSpec,
) -> Result<Vec<u8>> {
if oci.format() != OciImageFormat::Tar {
return Err(anyhow!(
"oci image spec for {} is required to be in tar format",
oci.digest
));
}
let image = self
.packer
.recall(&oci.digest, OciPackedFormat::Tar)
.await?;
let Some(image) = image else {
return Err(anyhow!("image {} was not found in tar format", oci.digest));
};
let mut archive = Archive::new(File::open(&image.path).await?);
let mut entries = archive.entries()?;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
if entry.header().size()? as usize > OCI_SPEC_TAR_FILE_MAX_SIZE {
return Err(anyhow!(
"file {} in image {} is larger than the size limit",
file.to_string_lossy(),
oci.digest
));
}
if path == file {
let mut buffer = Vec::new();
entry.read_to_end(&mut buffer).await?;
return Ok(buffer);
}
}
Err(anyhow!(
"unable to find file {} in image {}",
file.to_string_lossy(),
oci.digest
))
}
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
let Some(ref spec) = guest.spec else {
return Err(anyhow!("guest spec not specified"));
};
let Some(ref image) = spec.image else {
return Err(anyhow!("image spec not provided"));
};
let oci = match image.image {
Some(Image::Oci(ref oci)) => oci,
None => {
return Err(anyhow!("oci spec not specified"));
}
};
let task = spec.task.as_ref().cloned().unwrap_or_default();
let image = self
.packer
.recall(
&oci.digest,
match oci.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => {
return Err(anyhow!("tar image format is not supported for guests"));
}
},
)
.await?;
let Some(image) = image else {
return Err(anyhow!(
"image {} in the requested format did not exist",
oci.digest
));
};
let kernel = if let Some(ref spec) = spec.kernel {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("kernel image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("kernel/image"), oci)
.await?
} else {
fs::read(&self.kernel_path).await?
};
let initrd = if let Some(ref spec) = spec.initrd {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("initrd image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("krata/initrd"), oci)
.await?
} else {
fs::read(&self.initrd_path).await?
};
let info = self
.runtime
.launch(GuestLaunchRequest {
format: LaunchPackedFormat::Squashfs,
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
} else {
Some(spec.name.clone())
},
image,
kernel,
initrd,
vcpus: spec.vcpus,
mem: spec.mem,
env: task
.environment
.iter()
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
debug: false,
})
.await?;
self.glt.associate(uuid, info.domid).await;
info!("started guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Started.into(),
network: Some(guestinfo_to_networkstate(&info)),
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
domid: info.domid,
});
Ok(GuestReconcilerResult::Changed { rerun: false })
}
}
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
if value.is_empty() {
None
} else {
Some(value)
}
}

View File

@ -14,8 +14,8 @@ cgroups-rs = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.10" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.10" }
libc = { workspace = true }
log = { workspace = true }
nix = { workspace = true, features = ["ioctl", "process", "fs"] }

View File

@ -1,15 +1,17 @@
use crate::{
childwait::{ChildEvent, ChildWait},
death,
exec::GuestExecTask,
metrics::MetricsCollector,
};
use anyhow::Result;
use cgroups_rs::Cgroup;
use krata::idm::{
client::IdmClient,
protocol::{
idm_event::Event, idm_request::Request, idm_response::Response, IdmEvent, IdmExitEvent,
IdmMetricsResponse, IdmPingResponse, IdmRequest,
client::{IdmClientStreamResponseHandle, IdmInternalClient},
internal::{
event::Event as EventType, request::Request as RequestType,
response::Response as ResponseType, Event, ExecStreamResponseUpdate, ExitEvent,
MetricsResponse, PingResponse, Request, Response,
},
};
use log::debug;
@ -17,14 +19,18 @@ use nix::unistd::Pid;
use tokio::{select, sync::broadcast};
pub struct GuestBackground {
idm: IdmClient,
idm: IdmInternalClient,
child: Pid,
_cgroup: Cgroup,
wait: ChildWait,
}
impl GuestBackground {
pub async fn new(idm: IdmClient, cgroup: Cgroup, child: Pid) -> Result<GuestBackground> {
pub async fn new(
idm: IdmInternalClient,
cgroup: Cgroup,
child: Pid,
) -> Result<GuestBackground> {
Ok(GuestBackground {
idm,
child,
@ -36,11 +42,11 @@ impl GuestBackground {
pub async fn run(&mut self) -> Result<()> {
let mut event_subscription = self.idm.subscribe().await?;
let mut requests_subscription = self.idm.requests().await?;
let mut request_streams_subscription = self.idm.request_streams().await?;
loop {
select! {
x = event_subscription.recv() => match x {
Ok(_event) => {
},
Err(broadcast::error::RecvError::Closed) => {
@ -54,8 +60,23 @@ impl GuestBackground {
},
x = requests_subscription.recv() => match x {
Ok(request) => {
self.handle_idm_request(request).await?;
Ok((id, request)) => {
self.handle_idm_request(id, request).await?;
},
Err(broadcast::error::RecvError::Closed) => {
debug!("idm packet channel closed");
break;
},
_ => {
continue;
}
},
x = request_streams_subscription.recv() => match x {
Ok(handle) => {
self.handle_idm_stream_request(handle).await?;
},
Err(broadcast::error::RecvError::Closed) => {
@ -79,25 +100,56 @@ impl GuestBackground {
Ok(())
}
async fn handle_idm_request(&mut self, packet: IdmRequest) -> Result<()> {
let id = packet.id;
async fn handle_idm_request(&mut self, id: u64, packet: Request) -> Result<()> {
match packet.request {
Some(Request::Ping(_)) => {
Some(RequestType::Ping(_)) => {
self.idm
.respond(id, Response::Ping(IdmPingResponse {}))
.respond(
id,
Response {
response: Some(ResponseType::Ping(PingResponse {})),
},
)
.await?;
}
Some(Request::Metrics(_)) => {
Some(RequestType::Metrics(_)) => {
let metrics = MetricsCollector::new()?;
let root = metrics.collect()?;
let response = IdmMetricsResponse { root: Some(root) };
let response = Response {
response: Some(ResponseType::Metrics(MetricsResponse { root: Some(root) })),
};
self.idm.respond(id, Response::Metrics(response)).await?;
self.idm.respond(id, response).await?;
}
None => {}
_ => {}
}
Ok(())
}
async fn handle_idm_stream_request(
&mut self,
handle: IdmClientStreamResponseHandle<Request>,
) -> Result<()> {
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
tokio::task::spawn(async move {
let exec = GuestExecTask { handle };
if let Err(error) = exec.run().await {
let _ = exec
.handle
.respond(Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: true,
error: error.to_string(),
exit_code: -1,
stdout: vec![],
stderr: vec![],
})),
})
.await;
}
});
}
Ok(())
}
@ -105,8 +157,8 @@ impl GuestBackground {
async fn child_event(&mut self, event: ChildEvent) -> Result<()> {
if event.pid == self.child {
self.idm
.emit(IdmEvent {
event: Some(Event::Exit(IdmExitEvent { code: event.status })),
.emit(Event {
event: Some(EventType::Exit(ExitEvent { code: event.status })),
})
.await?;
death(event.status).await?;

172
crates/guest/src/exec.rs Normal file
View File

@ -0,0 +1,172 @@
use std::{collections::HashMap, process::Stdio};
use anyhow::{anyhow, Result};
use krata::idm::{
client::IdmClientStreamResponseHandle,
internal::{
exec_stream_request_update::Update, request::Request as RequestType,
ExecStreamResponseUpdate,
},
internal::{response::Response as ResponseType, Request, Response},
};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
join,
process::Command,
};
pub struct GuestExecTask {
pub handle: IdmClientStreamResponseHandle<Request>,
}
impl GuestExecTask {
pub async fn run(&self) -> Result<()> {
let mut receiver = self.handle.take().await?;
let Some(ref request) = self.handle.initial.request else {
return Err(anyhow!("request was empty"));
};
let RequestType::ExecStream(update) = request else {
return Err(anyhow!("request was not an exec update"));
};
let Some(Update::Start(ref start)) = update.update else {
return Err(anyhow!("first request did not contain a start update"));
};
let mut cmd = start.command.clone();
if cmd.is_empty() {
return Err(anyhow!("command line was empty"));
}
let exe = cmd.remove(0);
let mut env = HashMap::new();
for entry in &start.environment {
env.insert(entry.key.clone(), entry.value.clone());
}
if !env.contains_key("PATH") {
env.insert(
"PATH".to_string(),
"/bin:/usr/bin:/usr/local/bin".to_string(),
);
}
let dir = if start.working_directory.is_empty() {
"/".to_string()
} else {
start.working_directory.clone()
};
let mut child = Command::new(exe)
.args(cmd)
.envs(env)
.current_dir(dir)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()
.map_err(|error| anyhow!("failed to spawn: {}", error))?;
let mut stdin = child
.stdin
.take()
.ok_or_else(|| anyhow!("stdin was missing"))?;
let mut stdout = child
.stdout
.take()
.ok_or_else(|| anyhow!("stdout was missing"))?;
let mut stderr = child
.stderr
.take()
.ok_or_else(|| anyhow!("stderr was missing"))?;
let stdout_handle = self.handle.clone();
let stdout_task = tokio::task::spawn(async move {
let mut stdout_buffer = vec![0u8; 8 * 1024];
loop {
let Ok(size) = stdout.read(&mut stdout_buffer).await else {
break;
};
if size > 0 {
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: false,
exit_code: 0,
error: String::new(),
stdout: stdout_buffer[0..size].to_vec(),
stderr: vec![],
})),
};
let _ = stdout_handle.respond(response).await;
} else {
break;
}
}
});
let stderr_handle = self.handle.clone();
let stderr_task = tokio::task::spawn(async move {
let mut stderr_buffer = vec![0u8; 8 * 1024];
loop {
let Ok(size) = stderr.read(&mut stderr_buffer).await else {
break;
};
if size > 0 {
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: false,
exit_code: 0,
error: String::new(),
stdout: vec![],
stderr: stderr_buffer[0..size].to_vec(),
})),
};
let _ = stderr_handle.respond(response).await;
} else {
break;
}
}
});
let stdin_task = tokio::task::spawn(async move {
loop {
let Some(request) = receiver.recv().await else {
break;
};
let Some(RequestType::ExecStream(update)) = request.request else {
continue;
};
let Some(Update::Stdin(update)) = update.update else {
continue;
};
if stdin.write_all(&update.data).await.is_err() {
break;
}
}
});
let exit = child.wait().await?;
let code = exit.code().unwrap_or(-1);
let _ = join!(stdout_task, stderr_task);
stdin_task.abort();
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: true,
exit_code: code,
error: String::new(),
stdout: vec![],
stderr: vec![],
})),
};
self.handle.respond(response).await?;
Ok(())
}
}

View File

@ -3,7 +3,8 @@ use cgroups_rs::{Cgroup, CgroupPid};
use futures::stream::TryStreamExt;
use ipnetwork::IpNetwork;
use krata::ethtool::EthtoolHandle;
use krata::idm::client::IdmClient;
use krata::idm::client::IdmInternalClient;
use krata::idm::internal::INTERNAL_IDM_CHANNEL;
use krata::launchcfg::{LaunchInfo, LaunchNetwork, LaunchPackedFormat};
use libc::{sethostname, setsid, TIOCSCTTY};
use log::{trace, warn};
@ -77,7 +78,7 @@ impl GuestInit {
Err(error) => warn!("failed to open console: {}", error),
};
let idm = IdmClient::open("/dev/hvc1")
let idm = IdmInternalClient::open(INTERNAL_IDM_CHANNEL, "/dev/hvc1")
.await
.map_err(|x| anyhow!("failed to open idm client: {}", x))?;
self.mount_config_image().await?;
@ -438,7 +439,12 @@ impl GuestInit {
Ok(())
}
async fn run(&mut self, config: &Config, launch: &LaunchInfo, idm: IdmClient) -> Result<()> {
async fn run(
&mut self,
config: &Config,
launch: &LaunchInfo,
idm: IdmInternalClient,
) -> Result<()> {
let mut cmd = match config.cmd() {
None => vec![],
Some(value) => value.clone(),
@ -473,7 +479,7 @@ impl GuestInit {
env.insert("TERM".to_string(), "xterm".to_string());
}
let path = GuestInit::resolve_executable(&env, path.into())?;
let path = resolve_executable(&env, path.into())?;
let Some(file_name) = path.file_name() else {
return Err(anyhow!("cannot get file name of command path"));
};
@ -531,27 +537,6 @@ impl GuestInit {
map
}
fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
if path.is_absolute() {
return Ok(path);
}
if path.is_file() {
return Ok(path.absolutize()?.to_path_buf());
}
if let Some(path_var) = env.get("PATH") {
for item in path_var.split(':') {
let mut exe_path: PathBuf = item.into();
exe_path.push(&path);
if exe_path.is_file() {
return Ok(exe_path);
}
}
}
Ok(path)
}
fn env_list(env: HashMap<String, String>) -> Vec<String> {
env.iter()
.map(|(key, value)| format!("{}={}", key, value))
@ -560,7 +545,7 @@ impl GuestInit {
async fn fork_and_exec(
&mut self,
idm: IdmClient,
idm: IdmInternalClient,
cgroup: Cgroup,
working_dir: String,
path: CString,
@ -596,9 +581,35 @@ impl GuestInit {
Ok(())
}
async fn background(&mut self, idm: IdmClient, cgroup: Cgroup, executed: Pid) -> Result<()> {
async fn background(
&mut self,
idm: IdmInternalClient,
cgroup: Cgroup,
executed: Pid,
) -> Result<()> {
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
background.run().await?;
Ok(())
}
}
pub fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
if path.is_absolute() {
return Ok(path);
}
if path.is_file() {
return Ok(path.absolutize()?.to_path_buf());
}
if let Some(path_var) = env.get("PATH") {
for item in path_var.split(':') {
let mut exe_path: PathBuf = item.into();
exe_path.push(&path);
if exe_path.is_file() {
return Ok(exe_path);
}
}
}
Ok(path)
}

View File

@ -6,6 +6,7 @@ use xenstore::{XsdClient, XsdInterface};
pub mod background;
pub mod childwait;
pub mod exec;
pub mod init;
pub mod metrics;

View File

@ -1,7 +1,7 @@
use std::{ops::Add, path::Path};
use anyhow::Result;
use krata::idm::protocol::{IdmMetricFormat, IdmMetricNode};
use krata::idm::internal::{MetricFormat, MetricNode};
use sysinfo::Process;
pub struct MetricsCollector {}
@ -11,9 +11,9 @@ impl MetricsCollector {
Ok(MetricsCollector {})
}
pub fn collect(&self) -> Result<IdmMetricNode> {
pub fn collect(&self) -> Result<MetricNode> {
let mut sysinfo = sysinfo::System::new();
Ok(IdmMetricNode::structural(
Ok(MetricNode::structural(
"guest",
vec![
self.collect_system(&mut sysinfo)?,
@ -22,22 +22,22 @@ impl MetricsCollector {
))
}
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
sysinfo.refresh_memory();
Ok(IdmMetricNode::structural(
Ok(MetricNode::structural(
"system",
vec![IdmMetricNode::structural(
vec![MetricNode::structural(
"memory",
vec![
IdmMetricNode::value("total", sysinfo.total_memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("used", sysinfo.used_memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("free", sysinfo.free_memory(), IdmMetricFormat::Bytes),
MetricNode::value("total", sysinfo.total_memory(), MetricFormat::Bytes),
MetricNode::value("used", sysinfo.used_memory(), MetricFormat::Bytes),
MetricNode::value("free", sysinfo.free_memory(), MetricFormat::Bytes),
],
)],
))
}
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
sysinfo.refresh_processes();
let mut processes = Vec::new();
let mut sysinfo_processes = sysinfo.processes().values().collect::<Vec<_>>();
@ -48,71 +48,68 @@ impl MetricsCollector {
}
processes.push(MetricsCollector::process_node(process)?);
}
Ok(IdmMetricNode::structural("process", processes))
Ok(MetricNode::structural("process", processes))
}
fn process_node(process: &Process) -> Result<IdmMetricNode> {
fn process_node(process: &Process) -> Result<MetricNode> {
let mut metrics = vec![];
if let Some(parent) = process.parent() {
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"parent",
parent.as_u32() as u64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
}
if let Some(exe) = process.exe().and_then(path_as_str) {
metrics.push(IdmMetricNode::raw_value("executable", exe));
metrics.push(MetricNode::raw_value("executable", exe));
}
if let Some(working_directory) = process.cwd().and_then(path_as_str) {
metrics.push(IdmMetricNode::raw_value("cwd", working_directory));
metrics.push(MetricNode::raw_value("cwd", working_directory));
}
let cmdline = process.cmd().to_vec();
metrics.push(IdmMetricNode::raw_value("cmdline", cmdline));
metrics.push(IdmMetricNode::structural(
metrics.push(MetricNode::raw_value("cmdline", cmdline));
metrics.push(MetricNode::structural(
"memory",
vec![
IdmMetricNode::value("resident", process.memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("virtual", process.virtual_memory(), IdmMetricFormat::Bytes),
MetricNode::value("resident", process.memory(), MetricFormat::Bytes),
MetricNode::value("virtual", process.virtual_memory(), MetricFormat::Bytes),
],
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"lifetime",
process.run_time(),
IdmMetricFormat::DurationSeconds,
MetricFormat::DurationSeconds,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"uid",
process.user_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"gid",
process.group_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"euid",
process
.effective_user_id()
.map(|x| (*x).add(0))
.unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"egid",
process.effective_group_id().map(|x| x.add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
Ok(IdmMetricNode::structural(
process.pid().to_string(),
metrics,
))
Ok(MetricNode::structural(process.pid().to_string(), metrics))
}
}

View File

@ -6,12 +6,20 @@ fn main() -> Result<()> {
.descriptor_pool("crate::DESCRIPTOR_POOL")
.configure(
&mut config,
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
tonic_build::configure().compile_with_config(
config,
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
Ok(())

View File

@ -1,67 +0,0 @@
syntax = "proto3";
package krata.bus.idm;
option java_multiple_files = true;
option java_package = "dev.krata.proto.bus.idm";
option java_outer_classname = "IdmProto";
import "google/protobuf/struct.proto";
message IdmPacket {
oneof content {
IdmEvent event = 1;
IdmRequest request = 2;
IdmResponse response = 3;
}
}
message IdmEvent {
oneof event {
IdmExitEvent exit = 1;
}
}
message IdmExitEvent {
int32 code = 1;
}
message IdmRequest {
uint64 id = 1;
oneof request {
IdmPingRequest ping = 2;
IdmMetricsRequest metrics = 3;
}
}
message IdmPingRequest {}
message IdmMetricsRequest {}
message IdmResponse {
uint64 id = 1;
oneof response {
IdmPingResponse ping = 2;
IdmMetricsResponse metrics = 3;
}
}
message IdmPingResponse {}
message IdmMetricsResponse {
IdmMetricNode root = 1;
}
message IdmMetricNode {
string name = 1;
google.protobuf.Value value = 2;
IdmMetricFormat format = 3;
repeated IdmMetricNode children = 4;
}
enum IdmMetricFormat {
IDM_METRIC_FORMAT_UNKNOWN = 0;
IDM_METRIC_FORMAT_BYTES = 1;
IDM_METRIC_FORMAT_INTEGER = 2;
IDM_METRIC_FORMAT_DURATION_SECONDS = 3;
}

View File

@ -0,0 +1,89 @@
syntax = "proto3";
package krata.idm.internal;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.internal";
option java_outer_classname = "IdmInternalProto";
import "google/protobuf/struct.proto";
message ExitEvent {
int32 code = 1;
}
message PingRequest {}
message PingResponse {}
message MetricsRequest {}
message MetricsResponse {
MetricNode root = 1;
}
message MetricNode {
string name = 1;
google.protobuf.Value value = 2;
MetricFormat format = 3;
repeated MetricNode children = 4;
}
enum MetricFormat {
METRIC_FORMAT_UNKNOWN = 0;
METRIC_FORMAT_BYTES = 1;
METRIC_FORMAT_INTEGER = 2;
METRIC_FORMAT_DURATION_SECONDS = 3;
}
message ExecEnvVar {
string key = 1;
string value = 2;
}
message ExecStreamRequestStart {
repeated ExecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
}
message ExecStreamRequestStdin {
bytes data = 1;
}
message ExecStreamRequestUpdate {
oneof update {
ExecStreamRequestStart start = 1;
ExecStreamRequestStdin stdin = 2;
}
}
message ExecStreamResponseUpdate {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message Event {
oneof event {
ExitEvent exit = 1;
}
}
message Request {
oneof request {
PingRequest ping = 1;
MetricsRequest metrics = 2;
ExecStreamRequestUpdate exec_stream = 3;
}
}
message Response {
oneof response {
PingResponse ping = 1;
MetricsResponse metrics = 2;
ExecStreamResponseUpdate exec_stream = 3;
}
}

View File

@ -0,0 +1,27 @@
syntax = "proto3";
package krata.idm.transport;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.transport";
option java_outer_classname = "IdmTransportProto";
message IdmTransportPacket {
uint64 id = 1;
uint64 channel = 2;
IdmTransportPacketForm form = 3;
bytes data = 4;
}
enum IdmTransportPacketForm {
IDM_TRANSPORT_PACKET_FORM_UNKNOWN = 0;
IDM_TRANSPORT_PACKET_FORM_RAW = 1;
IDM_TRANSPORT_PACKET_FORM_EVENT = 2;
IDM_TRANSPORT_PACKET_FORM_REQUEST = 3;
IDM_TRANSPORT_PACKET_FORM_RESPONSE = 4;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST = 5;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_UPDATE = 6;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_UPDATE = 7;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_CLOSED = 8;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_CLOSED = 9;
}

View File

@ -17,10 +17,14 @@ message Guest {
message GuestSpec {
string name = 1;
GuestImageSpec image = 2;
uint32 vcpus = 3;
uint64 mem = 4;
GuestTaskSpec task = 5;
repeated GuestSpecAnnotation annotations = 6;
// If not specified, defaults to the daemon default kernel.
GuestImageSpec kernel = 3;
// If not specified, defaults to the daemon default initrd.
GuestImageSpec initrd = 4;
uint32 vcpus = 5;
uint64 mem = 6;
GuestTaskSpec task = 7;
repeated GuestSpecAnnotation annotations = 8;
}
message GuestImageSpec {
@ -29,20 +33,23 @@ message GuestImageSpec {
}
}
enum GuestOciImageFormat {
GUEST_OCI_IMAGE_FORMAT_UNKNOWN = 0;
GUEST_OCI_IMAGE_FORMAT_SQUASHFS = 1;
GUEST_OCI_IMAGE_FORMAT_EROFS = 2;
enum OciImageFormat {
OCI_IMAGE_FORMAT_UNKNOWN = 0;
OCI_IMAGE_FORMAT_SQUASHFS = 1;
OCI_IMAGE_FORMAT_EROFS = 2;
// Tar format is not launchable, and is intended for kernel images.
OCI_IMAGE_FORMAT_TAR = 3;
}
message GuestOciImageSpec {
string digest = 1;
GuestOciImageFormat format = 2;
OciImageFormat format = 2;
}
message GuestTaskSpec {
repeated GuestTaskSpecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
}
message GuestTaskSpecEnvVar {
@ -60,7 +67,8 @@ message GuestState {
GuestNetworkState network = 2;
GuestExitInfo exit_info = 3;
GuestErrorInfo error_info = 4;
uint32 domid = 5;
string host = 5;
uint32 domid = 6;
}
enum GuestStatus {

View File

@ -6,15 +6,19 @@ option java_multiple_files = true;
option java_package = "dev.krata.proto.v1.control";
option java_outer_classname = "ControlProto";
import "krata/bus/idm.proto";
import "krata/idm/transport.proto";
import "krata/v1/common.proto";
service ControlService {
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
rpc ExecGuest(stream ExecGuestRequest) returns (stream ExecGuestReply);
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
@ -24,6 +28,14 @@ service ControlService {
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
}
message IdentifyHostRequest {}
message IdentifyHostReply {
string host_uuid = 1;
uint32 host_domid = 2;
string krata_version = 3;
}
message CreateGuestRequest {
krata.v1.common.GuestSpec spec = 1;
}
@ -52,6 +64,20 @@ message ListGuestsReply {
repeated krata.v1.common.Guest guests = 1;
}
message ExecGuestRequest {
string guest_id = 1;
krata.v1.common.GuestTaskSpec task = 2;
bytes data = 3;
}
message ExecGuestReply {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message ConsoleDataRequest {
string guest_id = 1;
bytes data = 2;
@ -84,51 +110,80 @@ message ReadGuestMetricsReply {
message SnoopIdmRequest {}
message SnoopIdmReply {
uint32 from = 1;
uint32 to = 2;
krata.bus.idm.IdmPacket packet = 3;
string from = 1;
string to = 2;
krata.idm.transport.IdmTransportPacket packet = 3;
}
enum PullImageProgressLayerPhase {
PULL_IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
PULL_IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
message ImageProgress {
ImageProgressPhase phase = 1;
repeated ImageProgressLayer layers = 2;
ImageProgressIndication indication = 3;
}
message PullImageProgressLayer {
enum ImageProgressPhase {
IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_PHASE_STARTED = 1;
IMAGE_PROGRESS_PHASE_RESOLVING = 2;
IMAGE_PROGRESS_PHASE_RESOLVED = 3;
IMAGE_PROGRESS_PHASE_CONFIG_DOWNLOAD = 4;
IMAGE_PROGRESS_PHASE_LAYER_DOWNLOAD = 5;
IMAGE_PROGRESS_PHASE_ASSEMBLE = 6;
IMAGE_PROGRESS_PHASE_PACK = 7;
IMAGE_PROGRESS_PHASE_COMPLETE = 8;
}
message ImageProgressLayer {
string id = 1;
PullImageProgressLayerPhase phase = 2;
uint64 value = 3;
uint64 total = 4;
ImageProgressLayerPhase phase = 2;
ImageProgressIndication indication = 3;
}
enum PullImageProgressPhase {
PULL_IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
PULL_IMAGE_PROGRESS_PHASE_RESOLVING = 1;
PULL_IMAGE_PROGRESS_PHASE_RESOLVED = 2;
PULL_IMAGE_PROGRESS_PHASE_CONFIG_ACQUIRE = 3;
PULL_IMAGE_PROGRESS_PHASE_LAYER_ACQUIRE = 4;
PULL_IMAGE_PROGRESS_PHASE_PACKING = 5;
PULL_IMAGE_PROGRESS_PHASE_COMPLETE = 6;
enum ImageProgressLayerPhase {
IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
}
message PullImageProgress {
PullImageProgressPhase phase = 1;
repeated PullImageProgressLayer layers = 2;
uint64 value = 3;
uint64 total = 4;
message ImageProgressIndication {
oneof indication {
ImageProgressIndicationBar bar = 1;
ImageProgressIndicationSpinner spinner = 2;
ImageProgressIndicationHidden hidden = 3;
ImageProgressIndicationCompleted completed = 4;
}
}
message ImageProgressIndicationBar {
string message = 1;
uint64 current = 2;
uint64 total = 3;
bool is_bytes = 4;
}
message ImageProgressIndicationSpinner {
string message = 1;
}
message ImageProgressIndicationHidden {}
message ImageProgressIndicationCompleted {
string message = 1;
uint64 total = 2;
bool is_bytes = 3;
}
message PullImageRequest {
string image = 1;
krata.v1.common.GuestOciImageFormat format = 2;
krata.v1.common.OciImageFormat format = 2;
bool overwrite_cache = 3;
}
message PullImageReply {
PullImageProgress progress = 1;
ImageProgress progress = 1;
string digest = 2;
krata.v1.common.GuestOciImageFormat format = 3;
krata.v1.common.OciImageFormat format = 3;
}

View File

@ -1 +0,0 @@
pub mod idm;

View File

@ -8,10 +8,6 @@ use std::{
time::Duration,
};
use super::protocol::{
idm_packet::Content, idm_request::Request, idm_response::Response, IdmEvent, IdmPacket,
IdmRequest, IdmResponse,
};
use anyhow::{anyhow, Result};
use log::{debug, error};
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
@ -22,14 +18,23 @@ use tokio::{
select,
sync::{
broadcast,
mpsc::{channel, Receiver, Sender},
mpsc::{self, Receiver, Sender},
oneshot, Mutex,
},
task::JoinHandle,
time::timeout,
};
type RequestMap = Arc<Mutex<HashMap<u64, oneshot::Sender<IdmResponse>>>>;
use super::{
internal,
serialize::{IdmRequest, IdmSerializable},
transport::{IdmTransportPacket, IdmTransportPacketForm},
};
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
const IDM_PACKET_QUEUE_LEN: usize = 100;
const IDM_REQUEST_TIMEOUT_SECS: u64 = 10;
@ -37,8 +42,8 @@ const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
#[async_trait::async_trait]
pub trait IdmBackend: Send {
async fn recv(&mut self) -> Result<IdmPacket>;
async fn send(&mut self, packet: IdmPacket) -> Result<()>;
async fn recv(&mut self) -> Result<IdmTransportPacket>;
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
}
pub struct IdmFileBackend {
@ -66,30 +71,30 @@ impl IdmFileBackend {
#[async_trait::async_trait]
impl IdmBackend for IdmFileBackend {
async fn recv(&mut self) -> Result<IdmPacket> {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
let mut fd = self.read_fd.lock().await;
let mut guard = fd.readable_mut().await?;
let b1 = guard.get_inner_mut().read_u8().await?;
if b1 != 0xff {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let b2 = guard.get_inner_mut().read_u8().await?;
if b2 != 0xff {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let size = guard.get_inner_mut().read_u32_le().await?;
if size == 0 {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let mut buffer = vec![0u8; size as usize];
guard.get_inner_mut().read_exact(&mut buffer).await?;
match IdmPacket::decode(buffer.as_slice()) {
match IdmTransportPacket::decode(buffer.as_slice()) {
Ok(packet) => Ok(packet),
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
}
}
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
let mut file = self.write.lock().await;
let data = packet.encode_to_vec();
file.write_all(&[0xff, 0xff]).await?;
@ -100,16 +105,19 @@ impl IdmBackend for IdmFileBackend {
}
#[derive(Clone)]
pub struct IdmClient {
request_backend_sender: broadcast::Sender<IdmRequest>,
pub struct IdmClient<R: IdmRequest, E: IdmSerializable> {
channel: u64,
request_backend_sender: broadcast::Sender<(u64, R)>,
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
next_request_id: Arc<Mutex<u64>>,
event_receiver_sender: broadcast::Sender<IdmEvent>,
tx_sender: Sender<IdmPacket>,
requests: RequestMap,
event_receiver_sender: broadcast::Sender<E>,
tx_sender: Sender<IdmTransportPacket>,
requests: OneshotRequestMap<R>,
request_streams: StreamRequestMap<R>,
task: Arc<JoinHandle<()>>,
}
impl Drop for IdmClient {
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClient<R, E> {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
@ -117,21 +125,122 @@ impl Drop for IdmClient {
}
}
impl IdmClient {
pub async fn new(backend: Box<dyn IdmBackend>) -> Result<IdmClient> {
pub struct IdmClientStreamRequestHandle<R: IdmRequest, E: IdmSerializable> {
pub id: u64,
pub receiver: Receiver<R::Response>,
pub client: IdmClient<R, E>,
}
impl<R: IdmRequest, E: IdmSerializable> IdmClientStreamRequestHandle<R, E> {
pub async fn update(&self, request: R) -> Result<()> {
self.client
.tx_sender
.send(IdmTransportPacket {
id: self.id,
channel: self.client.channel,
form: IdmTransportPacketForm::StreamRequestUpdate.into(),
data: request.encode()?,
})
.await?;
Ok(())
}
}
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClientStreamRequestHandle<R, E> {
fn drop(&mut self) {
let id = self.id;
let client = self.client.clone();
tokio::task::spawn(async move {
let _ = client
.tx_sender
.send(IdmTransportPacket {
id,
channel: client.channel,
form: IdmTransportPacketForm::StreamRequestClosed.into(),
data: vec![],
})
.await;
});
}
}
#[derive(Clone)]
pub struct IdmClientStreamResponseHandle<R: IdmRequest> {
pub initial: R,
pub id: u64,
channel: u64,
tx_sender: Sender<IdmTransportPacket>,
receiver: Arc<Mutex<Option<Receiver<R>>>>,
}
impl<R: IdmRequest> IdmClientStreamResponseHandle<R> {
pub async fn respond(&self, response: R::Response) -> Result<()> {
self.tx_sender
.send(IdmTransportPacket {
id: self.id,
channel: self.channel,
form: IdmTransportPacketForm::StreamResponseUpdate.into(),
data: response.encode()?,
})
.await?;
Ok(())
}
pub async fn take(&self) -> Result<Receiver<R>> {
let mut guard = self.receiver.lock().await;
let Some(receiver) = (*guard).take() else {
return Err(anyhow!("request has already been claimed!"));
};
Ok(receiver)
}
}
impl<R: IdmRequest> Drop for IdmClientStreamResponseHandle<R> {
fn drop(&mut self) {
if Arc::strong_count(&self.receiver) <= 1 {
let id = self.id;
let channel = self.channel;
let tx_sender = self.tx_sender.clone();
tokio::task::spawn(async move {
let _ = tx_sender
.send(IdmTransportPacket {
id,
channel,
form: IdmTransportPacketForm::StreamResponseClosed.into(),
data: vec![],
})
.await;
});
}
}
}
impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
pub async fn new(channel: u64, backend: Box<dyn IdmBackend>) -> Result<Self> {
let requests = Arc::new(Mutex::new(HashMap::new()));
let request_streams = Arc::new(Mutex::new(HashMap::new()));
let request_update_streams = Arc::new(Mutex::new(HashMap::new()));
let (event_sender, event_receiver) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (internal_request_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (tx_sender, tx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
let (internal_request_stream_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (tx_sender, tx_receiver) = mpsc::channel(IDM_PACKET_QUEUE_LEN);
let backend_event_sender = event_sender.clone();
let request_backend_sender = internal_request_backend_sender.clone();
let request_stream_backend_sender = internal_request_stream_backend_sender.clone();
let requests_for_client = requests.clone();
let request_streams_for_client = request_streams.clone();
let tx_sender_for_client = tx_sender.clone();
let task = tokio::task::spawn(async move {
if let Err(error) = IdmClient::process(
backend,
channel,
tx_sender,
backend_event_sender,
requests,
request_streams,
request_update_streams,
internal_request_backend_sender,
internal_request_stream_backend_sender,
event_receiver,
tx_receiver,
)
@ -141,16 +250,19 @@ impl IdmClient {
}
});
Ok(IdmClient {
channel,
next_request_id: Arc::new(Mutex::new(0)),
event_receiver_sender: event_sender.clone(),
request_backend_sender,
request_stream_backend_sender,
requests: requests_for_client,
tx_sender,
request_streams: request_streams_for_client,
tx_sender: tx_sender_for_client,
task: Arc::new(task),
})
}
pub async fn open<P: AsRef<Path>>(path: P) -> Result<IdmClient> {
pub async fn open<P: AsRef<Path>>(channel: u64, path: P) -> Result<Self> {
let read_file = File::options()
.read(true)
.write(false)
@ -164,39 +276,54 @@ impl IdmClient {
.open(path)
.await?;
let backend = IdmFileBackend::new(read_file, write_file).await?;
IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await
IdmClient::new(channel, Box::new(backend) as Box<dyn IdmBackend>).await
}
pub async fn emit(&self, event: IdmEvent) -> Result<()> {
pub async fn emit<T: IdmSerializable>(&self, event: T) -> Result<()> {
let id = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
*guard = req.wrapping_add(1);
req
};
self.tx_sender
.send(IdmPacket {
content: Some(Content::Event(event)),
.send(IdmTransportPacket {
id,
form: IdmTransportPacketForm::Event.into(),
channel: self.channel,
data: event.encode()?,
})
.await?;
Ok(())
}
pub async fn requests(&self) -> Result<broadcast::Receiver<IdmRequest>> {
pub async fn requests(&self) -> Result<broadcast::Receiver<(u64, R)>> {
Ok(self.request_backend_sender.subscribe())
}
pub async fn respond(&self, id: u64, response: Response) -> Result<()> {
let packet = IdmPacket {
content: Some(Content::Response(IdmResponse {
id,
response: Some(response),
})),
pub async fn request_streams(
&self,
) -> Result<broadcast::Receiver<IdmClientStreamResponseHandle<R>>> {
Ok(self.request_stream_backend_sender.subscribe())
}
pub async fn respond<T: IdmSerializable>(&self, id: u64, response: T) -> Result<()> {
let packet = IdmTransportPacket {
id,
form: IdmTransportPacketForm::Response.into(),
channel: self.channel,
data: response.encode()?,
};
self.tx_sender.send(packet).await?;
Ok(())
}
pub async fn subscribe(&self) -> Result<broadcast::Receiver<IdmEvent>> {
pub async fn subscribe(&self) -> Result<broadcast::Receiver<E>> {
Ok(self.event_receiver_sender.subscribe())
}
pub async fn send(&self, request: Request) -> Result<Response> {
let (sender, receiver) = oneshot::channel::<IdmResponse>();
pub async fn send(&self, request: R) -> Result<R::Response> {
let (sender, receiver) = oneshot::channel::<R::Response>();
let req = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
@ -217,52 +344,135 @@ impl IdmClient {
});
});
self.tx_sender
.send(IdmPacket {
content: Some(Content::Request(IdmRequest {
id: req,
request: Some(request),
})),
.send(IdmTransportPacket {
id: req,
channel: self.channel,
form: IdmTransportPacketForm::Request.into(),
data: request.encode()?,
})
.await?;
let response = timeout(Duration::from_secs(IDM_REQUEST_TIMEOUT_SECS), receiver).await??;
success.store(true, Ordering::Release);
if let Some(response) = response.response {
Ok(response)
} else {
Err(anyhow!("response did not contain any content"))
}
Ok(response)
}
pub async fn send_stream(&self, request: R) -> Result<IdmClientStreamRequestHandle<R, E>> {
let (sender, receiver) = mpsc::channel::<R::Response>(100);
let req = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
*guard = req.wrapping_add(1);
req
};
let mut requests = self.request_streams.lock().await;
requests.insert(req, sender);
drop(requests);
self.tx_sender
.send(IdmTransportPacket {
id: req,
channel: self.channel,
form: IdmTransportPacketForm::StreamRequest.into(),
data: request.encode()?,
})
.await?;
Ok(IdmClientStreamRequestHandle {
id: req,
receiver,
client: self.clone(),
})
}
#[allow(clippy::too_many_arguments)]
async fn process(
mut backend: Box<dyn IdmBackend>,
event_sender: broadcast::Sender<IdmEvent>,
requests: RequestMap,
request_backend_sender: broadcast::Sender<IdmRequest>,
_event_receiver: broadcast::Receiver<IdmEvent>,
mut receiver: Receiver<IdmPacket>,
channel: u64,
tx_sender: Sender<IdmTransportPacket>,
event_sender: broadcast::Sender<E>,
requests: OneshotRequestMap<R>,
request_streams: StreamRequestMap<R>,
request_update_streams: StreamRequestUpdateMap<R>,
request_backend_sender: broadcast::Sender<(u64, R)>,
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
_event_receiver: broadcast::Receiver<E>,
mut receiver: Receiver<IdmTransportPacket>,
) -> Result<()> {
loop {
select! {
x = backend.recv() => match x {
Ok(packet) => {
match packet.content {
Some(Content::Event(event)) => {
let _ = event_sender.send(event);
},
if packet.channel != channel {
continue;
}
Some(Content::Request(request)) => {
let _ = request_backend_sender.send(request);
},
Some(Content::Response(response)) => {
let mut requests = requests.lock().await;
if let Some(sender) = requests.remove(&response.id) {
drop(requests);
let _ = sender.send(response);
match packet.form() {
IdmTransportPacketForm::Event => {
if let Ok(event) = E::decode(&packet.data) {
let _ = event_sender.send(event);
}
},
IdmTransportPacketForm::Request => {
if let Ok(request) = R::decode(&packet.data) {
let _ = request_backend_sender.send((packet.id, request));
}
},
IdmTransportPacketForm::Response => {
let mut requests = requests.lock().await;
if let Some(sender) = requests.remove(&packet.id) {
drop(requests);
if let Ok(response) = R::Response::decode(&packet.data) {
let _ = sender.send(response);
}
}
},
IdmTransportPacketForm::StreamRequest => {
if let Ok(request) = R::decode(&packet.data) {
let mut update_streams = request_update_streams.lock().await;
let (sender, receiver) = mpsc::channel(100);
update_streams.insert(packet.id, sender.clone());
let handle = IdmClientStreamResponseHandle {
initial: request,
id: packet.id,
channel,
tx_sender: tx_sender.clone(),
receiver: Arc::new(Mutex::new(Some(receiver))),
};
let _ = request_stream_backend_sender.send(handle);
}
}
IdmTransportPacketForm::StreamRequestUpdate => {
if let Ok(request) = R::decode(&packet.data) {
let mut update_streams = request_update_streams.lock().await;
if let Some(stream) = update_streams.get_mut(&packet.id) {
let _ = stream.try_send(request);
}
}
}
IdmTransportPacketForm::StreamRequestClosed => {
let mut update_streams = request_update_streams.lock().await;
update_streams.remove(&packet.id);
}
IdmTransportPacketForm::StreamResponseUpdate => {
let requests = request_streams.lock().await;
if let Some(sender) = requests.get(&packet.id) {
if let Ok(response) = R::Response::decode(&packet.data) {
let _ = sender.try_send(response);
}
}
}
IdmTransportPacketForm::StreamResponseClosed => {
let mut requests = request_streams.lock().await;
requests.remove(&packet.id);
}
_ => {},
}
},

View File

@ -1,26 +1,66 @@
use anyhow::Result;
use prost::Message;
use prost_types::{ListValue, Value};
include!(concat!(env!("OUT_DIR"), "/krata.bus.idm.rs"));
use super::serialize::{IdmRequest, IdmSerializable};
include!(concat!(env!("OUT_DIR"), "/krata.idm.internal.rs"));
pub const INTERNAL_IDM_CHANNEL: u64 = 0;
impl IdmSerializable for Event {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
impl IdmSerializable for Request {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
impl IdmRequest for Request {
type Response = Response;
}
impl IdmSerializable for Response {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
pub trait AsIdmMetricValue {
fn as_metric_value(&self) -> Value;
}
impl IdmMetricNode {
pub fn structural<N: AsRef<str>>(name: N, children: Vec<IdmMetricNode>) -> IdmMetricNode {
IdmMetricNode {
impl MetricNode {
pub fn structural<N: AsRef<str>>(name: N, children: Vec<MetricNode>) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: None,
format: IdmMetricFormat::Unknown.into(),
format: MetricFormat::Unknown.into(),
children,
}
}
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> IdmMetricNode {
IdmMetricNode {
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: Some(value.as_metric_value()),
format: IdmMetricFormat::Unknown.into(),
format: MetricFormat::Unknown.into(),
children: vec![],
}
}
@ -28,9 +68,9 @@ impl IdmMetricNode {
pub fn value<N: AsRef<str>, V: AsIdmMetricValue>(
name: N,
value: V,
format: IdmMetricFormat,
) -> IdmMetricNode {
IdmMetricNode {
format: MetricFormat,
) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: Some(value.as_metric_value()),
format: format.into(),

View File

@ -1,3 +1,5 @@
#[cfg(unix)]
pub mod client;
pub use crate::bus::idm as protocol;
pub mod internal;
pub mod serialize;
pub mod transport;

View File

@ -0,0 +1,10 @@
use anyhow::Result;
pub trait IdmSerializable: Sized + Clone + Send + Sync + 'static {
fn decode(bytes: &[u8]) -> Result<Self>;
fn encode(&self) -> Result<Vec<u8>>;
}
pub trait IdmRequest: IdmSerializable {
type Response: IdmSerializable;
}

View File

@ -0,0 +1 @@
include!(concat!(env!("OUT_DIR"), "/krata.idm.transport.rs"));

View File

@ -1,7 +1,6 @@
use once_cell::sync::Lazy;
use prost_reflect::DescriptorPool;
pub mod bus;
pub mod v1;
pub mod client;

View File

@ -16,7 +16,7 @@ clap = { workspace = true }
env_logger = { workspace = true }
etherparse = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.10" }
krata-advmac = { workspace = true }
libc = { workspace = true }
log = { workspace = true }

View File

@ -5,10 +5,10 @@ use env_logger::Env;
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciPackedFormat},
progress::{OciProgress, OciProgressContext},
progress::OciProgressContext,
registry::OciPlatform,
};
use tokio::{fs, sync::mpsc::channel};
use tokio::fs;
#[tokio::main]
async fn main() -> Result<()> {
@ -22,27 +22,22 @@ async fn main() -> Result<()> {
fs::create_dir(&cache_dir).await?;
}
let (sender, mut receiver) = channel::<OciProgress>(100);
let (context, mut receiver) = OciProgressContext::create();
tokio::task::spawn(async move {
loop {
let mut progresses = Vec::new();
let _ = receiver.recv_many(&mut progresses, 100).await;
let Some(progress) = progresses.last() else {
continue;
};
if (receiver.changed().await).is_err() {
break;
}
let progress = receiver.borrow_and_update();
println!("phase {:?}", progress.phase);
for (id, layer) in &progress.layers {
println!(
"{} {:?} {} of {}",
id, layer.phase, layer.value, layer.total
)
println!("{} {:?} {:?}", id, layer.phase, layer.indication,)
}
}
});
let context = OciProgressContext::new(sender);
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current())?;
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current()).await?;
let packed = service
.request(image.clone(), OciPackedFormat::Squashfs, context)
.request(image.clone(), OciPackedFormat::Squashfs, false, context)
.await?;
println!(
"generated squashfs of {} to {}",

View File

@ -1,22 +1,25 @@
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
use crate::fetch::{OciImageFetcher, OciImageLayer, OciImageLayerReader, OciResolvedImage};
use crate::progress::OciBoundProgress;
use crate::schema::OciSchema;
use crate::vfs::{VfsNode, VfsTree};
use anyhow::{anyhow, Result};
use log::{debug, trace, warn};
use oci_spec::image::{ImageConfiguration, ImageManifest};
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::fs;
use tokio::io::AsyncRead;
use tokio_stream::StreamExt;
use tokio_tar::{Archive, Entry};
use uuid::Uuid;
pub struct OciImageAssembled {
pub digest: String,
pub manifest: ImageManifest,
pub config: ImageConfiguration,
pub descriptor: Descriptor,
pub manifest: OciSchema<ImageManifest>,
pub config: OciSchema<ImageConfiguration>,
pub vfs: Arc<VfsTree>,
pub tmp_dir: Option<PathBuf>,
}
@ -33,11 +36,12 @@ impl Drop for OciImageAssembled {
pub struct OciImageAssembler {
downloader: OciImageFetcher,
resolved: OciResolvedImage,
resolved: Option<OciResolvedImage>,
progress: OciBoundProgress,
work_dir: PathBuf,
disk_dir: PathBuf,
tmp_dir: Option<PathBuf>,
success: AtomicBool,
}
impl OciImageAssembler {
@ -81,11 +85,12 @@ impl OciImageAssembler {
Ok(OciImageAssembler {
downloader,
resolved,
resolved: Some(resolved),
progress,
work_dir,
disk_dir: target_dir,
tmp_dir,
success: AtomicBool::new(false),
})
}
@ -97,11 +102,11 @@ impl OciImageAssembler {
self.assemble_with(&layer_dir).await
}
async fn assemble_with(self, layer_dir: &Path) -> Result<OciImageAssembled> {
let local = self
.downloader
.download(self.resolved.clone(), layer_dir)
.await?;
async fn assemble_with(mut self, layer_dir: &Path) -> Result<OciImageAssembled> {
let Some(ref resolved) = self.resolved else {
return Err(anyhow!("resolved image was not available when expected"));
};
let local = self.downloader.download(resolved, layer_dir).await?;
let mut vfs = VfsTree::new();
for layer in &local.layers {
debug!(
@ -110,12 +115,14 @@ impl OciImageAssembler {
);
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, 0, 1);
progress.start_extracting_layer(&layer.digest);
})
.await;
debug!("process layer digest={}", &layer.digest,);
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
let mut count = 0u64;
let mut size = 0u64;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
@ -129,14 +136,21 @@ impl OciImageAssembler {
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
.await?;
} else {
vfs.insert_tar_entry(&entry)?;
self.process_write_entry(&mut vfs, &mut entry, layer)
let reference = vfs.insert_tar_entry(&entry)?;
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, &reference.name);
})
.await;
size += self
.process_write_entry(&mut vfs, &mut entry, layer)
.await?;
count += 1;
}
}
self.progress
.update(|progress| {
progress.extracted_layer(&layer.digest);
progress.extracted_layer(&layer.digest, count, size);
})
.await;
}
@ -145,19 +159,27 @@ impl OciImageAssembler {
fs::remove_file(&layer.path).await?;
}
}
Ok(OciImageAssembled {
let Some(resolved) = self.resolved.take() else {
return Err(anyhow!("resolved image was not available when expected"));
};
let assembled = OciImageAssembled {
vfs: Arc::new(vfs),
digest: self.resolved.digest,
manifest: self.resolved.manifest,
descriptor: resolved.descriptor,
digest: resolved.digest,
manifest: resolved.manifest,
config: local.config,
tmp_dir: self.tmp_dir,
})
tmp_dir: self.tmp_dir.clone(),
};
self.success.store(true, Ordering::Release);
Ok(assembled)
}
async fn process_whiteout_entry(
&self,
vfs: &mut VfsTree,
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
entry: &Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
name: &str,
layer: &OciImageLayer,
) -> Result<()> {
@ -198,11 +220,11 @@ impl OciImageAssembler {
async fn process_write_entry(
&self,
vfs: &mut VfsTree,
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
entry: &mut Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
layer: &OciImageLayer,
) -> Result<()> {
) -> Result<u64> {
if !entry.header().entry_type().is_file() {
return Ok(());
return Ok(0);
}
trace!(
"unpack entry layer={} path={:?} type={:?}",
@ -218,7 +240,19 @@ impl OciImageAssembler {
.await?
.ok_or(anyhow!("unpack did not return a path"))?;
vfs.set_disk_path(&entry.path()?, &path)?;
Ok(())
Ok(entry.header().size()?)
}
}
impl Drop for OciImageAssembler {
fn drop(&mut self) {
if !self.success.load(Ordering::Acquire) {
if let Some(tmp_dir) = self.tmp_dir.clone() {
tokio::task::spawn(async move {
let _ = fs::remove_dir_all(tmp_dir).await;
});
}
}
}
}

View File

@ -1,4 +1,7 @@
use crate::progress::{OciBoundProgress, OciProgressPhase};
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
schema::OciSchema,
};
use super::{
name::ImageName,
@ -6,6 +9,9 @@ use super::{
};
use std::{
fmt::Debug,
io::SeekFrom,
os::unix::fs::MetadataExt,
path::{Path, PathBuf},
pin::Pin,
};
@ -14,12 +20,13 @@ use anyhow::{anyhow, Result};
use async_compression::tokio::bufread::{GzipDecoder, ZstdDecoder};
use log::debug;
use oci_spec::image::{
Descriptor, ImageConfiguration, ImageIndex, ImageManifest, MediaType, ToDockerV2S2,
Descriptor, DescriptorBuilder, ImageConfiguration, ImageIndex, ImageManifest, MediaType,
ToDockerV2S2,
};
use serde::de::DeserializeOwned;
use tokio::{
fs::File,
io::{AsyncRead, AsyncReadExt, BufReader, BufWriter},
fs::{self, File},
io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, BufWriter},
};
use tokio_stream::StreamExt;
use tokio_tar::Archive;
@ -39,16 +46,43 @@ pub enum OciImageLayerCompression {
#[derive(Clone, Debug)]
pub struct OciImageLayer {
pub metadata: Descriptor,
pub path: PathBuf,
pub digest: String,
pub compression: OciImageLayerCompression,
}
#[async_trait::async_trait]
pub trait OciImageLayerReader: AsyncRead + Sync {
async fn position(&mut self) -> Result<u64>;
}
#[async_trait::async_trait]
impl OciImageLayerReader for BufReader<File> {
async fn position(&mut self) -> Result<u64> {
Ok(self.seek(SeekFrom::Current(0)).await?)
}
}
#[async_trait::async_trait]
impl OciImageLayerReader for GzipDecoder<BufReader<File>> {
async fn position(&mut self) -> Result<u64> {
self.get_mut().position().await
}
}
#[async_trait::async_trait]
impl OciImageLayerReader for ZstdDecoder<BufReader<File>> {
async fn position(&mut self) -> Result<u64> {
self.get_mut().position().await
}
}
impl OciImageLayer {
pub async fn decompress(&self) -> Result<Pin<Box<dyn AsyncRead + Send>>> {
pub async fn decompress(&self) -> Result<Pin<Box<dyn OciImageLayerReader + Send>>> {
let file = File::open(&self.path).await?;
let reader = BufReader::new(file);
let reader: Pin<Box<dyn AsyncRead + Send>> = match self.compression {
let reader: Pin<Box<dyn OciImageLayerReader + Send>> = match self.compression {
OciImageLayerCompression::None => Box::pin(reader),
OciImageLayerCompression::Gzip => Box::pin(GzipDecoder::new(reader)),
OciImageLayerCompression::Zstd => Box::pin(ZstdDecoder::new(reader)),
@ -56,7 +90,7 @@ impl OciImageLayer {
Ok(reader)
}
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn AsyncRead + Send>>>> {
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>> {
let decompress = self.decompress().await?;
Ok(Archive::new(decompress))
}
@ -66,13 +100,14 @@ impl OciImageLayer {
pub struct OciResolvedImage {
pub name: ImageName,
pub digest: String,
pub manifest: ImageManifest,
pub descriptor: Descriptor,
pub manifest: OciSchema<ImageManifest>,
}
#[derive(Clone, Debug)]
pub struct OciLocalImage {
pub image: OciResolvedImage,
pub config: ImageConfiguration,
pub config: OciSchema<ImageConfiguration>,
pub layers: Vec<OciImageLayer>,
}
@ -89,10 +124,10 @@ impl OciImageFetcher {
}
}
async fn load_seed_json_blob<T: DeserializeOwned>(
async fn load_seed_json_blob<T: Clone + Debug + DeserializeOwned>(
&self,
descriptor: &Descriptor,
) -> Result<Option<T>> {
) -> Result<Option<OciSchema<T>>> {
let digest = descriptor.digest();
let Some((digest_type, digest_content)) = digest.split_once(':') else {
return Err(anyhow!("digest content was not properly formatted"));
@ -101,7 +136,10 @@ impl OciImageFetcher {
self.load_seed_json(&want).await
}
async fn load_seed_json<T: DeserializeOwned>(&self, want: &str) -> Result<Option<T>> {
async fn load_seed_json<T: Clone + Debug + DeserializeOwned>(
&self,
want: &str,
) -> Result<Option<OciSchema<T>>> {
let Some(ref seed) = self.seed else {
return Ok(None);
};
@ -113,10 +151,10 @@ impl OciImageFetcher {
let mut entry = entry?;
let path = String::from_utf8(entry.path_bytes().to_vec())?;
if path == want {
let mut content = String::new();
entry.read_to_string(&mut content).await?;
let data = serde_json::from_str::<T>(&content)?;
return Ok(Some(data));
let mut content = Vec::new();
entry.read_to_end(&mut content).await?;
let item = serde_json::from_slice::<T>(&content)?;
return Ok(Some(OciSchema::new(content, item)));
}
}
Ok(None)
@ -154,7 +192,7 @@ impl OciImageFetcher {
if let Some(index) = self.load_seed_json::<ImageIndex>("index.json").await? {
let mut found: Option<&Descriptor> = None;
for manifest in index.manifests() {
for manifest in index.item().manifests() {
let Some(annotations) = manifest.annotations() else {
continue;
};
@ -179,6 +217,13 @@ impl OciImageFetcher {
continue;
}
}
if let Some(ref digest) = image.digest {
if digest != manifest.digest() {
continue;
}
}
found = Some(manifest);
break;
}
@ -192,6 +237,7 @@ impl OciImageFetcher {
);
return Ok(OciResolvedImage {
name: image,
descriptor: found.clone(),
digest: found.digest().clone(),
manifest,
});
@ -200,11 +246,20 @@ impl OciImageFetcher {
}
let mut client = OciRegistryClient::new(image.registry_url()?, self.platform.clone())?;
let (manifest, digest) = client
.get_manifest_with_digest(&image.name, &image.reference)
let (manifest, descriptor, digest) = client
.get_manifest_with_digest(&image.name, image.reference.as_ref(), image.digest.as_ref())
.await?;
let descriptor = descriptor.unwrap_or_else(|| {
DescriptorBuilder::default()
.media_type(MediaType::ImageManifest)
.size(manifest.raw().len() as i64)
.digest(digest.clone())
.build()
.unwrap()
});
Ok(OciResolvedImage {
name: image,
descriptor,
digest,
manifest,
})
@ -212,41 +267,44 @@ impl OciImageFetcher {
pub async fn download(
&self,
image: OciResolvedImage,
image: &OciResolvedImage,
layer_dir: &Path,
) -> Result<OciLocalImage> {
let config: ImageConfiguration;
let config: OciSchema<ImageConfiguration>;
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::ConfigAcquire;
progress.phase = OciProgressPhase::ConfigDownload;
})
.await;
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
if let Some(seeded) = self
.load_seed_json_blob::<ImageConfiguration>(image.manifest.config())
.load_seed_json_blob::<ImageConfiguration>(image.manifest.item().config())
.await?
{
config = seeded;
} else {
let config_bytes = client
.get_blob(&image.name.name, image.manifest.config())
.get_blob(&image.name.name, image.manifest.item().config())
.await?;
config = serde_json::from_slice(&config_bytes)?;
config = OciSchema::new(
config_bytes.to_vec(),
serde_json::from_slice(&config_bytes)?,
);
}
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::LayerAcquire;
progress.phase = OciProgressPhase::LayerDownload;
for layer in image.manifest.layers() {
progress.add_layer(layer.digest(), layer.size() as usize);
for layer in image.manifest.item().layers() {
progress.add_layer(layer.digest());
}
})
.await;
let mut layers = Vec::new();
for layer in image.manifest.layers() {
for layer in image.manifest.item().layers() {
self.progress
.update(|progress| {
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
progress.downloading_layer(layer.digest(), 0, layer.size() as u64);
})
.await;
layers.push(
@ -255,12 +313,12 @@ impl OciImageFetcher {
);
self.progress
.update(|progress| {
progress.downloaded_layer(layer.digest());
progress.downloaded_layer(layer.digest(), layer.size() as u64);
})
.await;
}
Ok(OciLocalImage {
image,
image: image.clone(),
config,
layers,
})
@ -294,6 +352,12 @@ impl OciImageFetcher {
}
}
let metadata = fs::metadata(&layer_path).await?;
if layer.size() as u64 != metadata.size() {
return Err(anyhow!("layer size differs from size in manifest",));
}
let mut media_type = layer.media_type().clone();
// docker layer compatibility
@ -308,6 +372,7 @@ impl OciImageFetcher {
other => return Err(anyhow!("found layer with unknown media type: {}", other)),
};
Ok(OciImageLayer {
metadata: layer.clone(),
path: layer_path,
digest: layer.digest().clone(),
compression,

View File

@ -4,4 +4,5 @@ pub mod name;
pub mod packer;
pub mod progress;
pub mod registry;
pub mod schema;
pub mod vfs;

View File

@ -2,27 +2,39 @@ use anyhow::Result;
use std::fmt;
use url::Url;
const DOCKER_HUB_MIRROR: &str = "mirror.gcr.io";
const DEFAULT_IMAGE_TAG: &str = "latest";
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ImageName {
pub hostname: String,
pub port: Option<u16>,
pub name: String,
pub reference: String,
pub reference: Option<String>,
pub digest: Option<String>,
}
impl fmt::Display for ImageName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(port) = self.port {
write!(
f,
"{}:{}/{}:{}",
self.hostname, port, self.name, self.reference
)
let mut suffix = String::new();
if let Some(ref reference) = self.reference {
suffix.push(':');
suffix.push_str(reference);
}
if let Some(ref digest) = self.digest {
suffix.push('@');
suffix.push_str(digest);
}
if ImageName::DOCKER_HUB_MIRROR == self.hostname && self.port.is_none() {
if self.name.starts_with("library/") {
write!(f, "{}{}", &self.name[8..], suffix)
} else {
write!(f, "{}{}", self.name, suffix)
}
} else if let Some(port) = self.port {
write!(f, "{}:{}/{}{}", self.hostname, port, self.name, suffix)
} else {
write!(f, "{}/{}:{}", self.hostname, self.name, self.reference)
write!(f, "{}/{}{}", self.hostname, self.name, suffix)
}
}
}
@ -35,13 +47,21 @@ impl Default for ImageName {
}
impl ImageName {
pub const DOCKER_HUB_MIRROR: &'static str = "registry.docker.io";
pub const DEFAULT_IMAGE_TAG: &'static str = "latest";
pub fn parse(name: &str) -> Result<Self> {
let full_name = name.to_string();
let name = full_name.clone();
let (mut hostname, mut name) = name
.split_once('/')
.map(|x| (x.0.to_string(), x.1.to_string()))
.unwrap_or_else(|| (DOCKER_HUB_MIRROR.to_string(), format!("library/{}", name)));
.unwrap_or_else(|| {
(
ImageName::DOCKER_HUB_MIRROR.to_string(),
format!("library/{}", name),
)
});
// heuristic to find any docker hub image formats
// that may be in the hostname format. for example:
@ -49,7 +69,7 @@ impl ImageName {
// and neither will abc/hello/xyz:latest
if !hostname.contains('.') && full_name.chars().filter(|x| *x == '/').count() == 1 {
name = format!("{}/{}", hostname, name);
hostname = DOCKER_HUB_MIRROR.to_string();
hostname = ImageName::DOCKER_HUB_MIRROR.to_string();
}
let (hostname, port) = if let Some((hostname, port)) = hostname
@ -60,15 +80,54 @@ impl ImageName {
} else {
(hostname, None)
};
let (name, reference) = name
.split_once(':')
.map(|x| (x.0.to_string(), x.1.to_string()))
.unwrap_or((name.to_string(), DEFAULT_IMAGE_TAG.to_string()));
let name_has_digest = if name.contains('@') {
let digest_start = name.chars().position(|c| c == '@');
let ref_start = name.chars().position(|c| c == ':');
if let (Some(digest_start), Some(ref_start)) = (digest_start, ref_start) {
digest_start < ref_start
} else {
true
}
} else {
false
};
let (name, digest) = if name_has_digest {
name.split_once('@')
.map(|(name, digest)| (name.to_string(), Some(digest.to_string())))
.unwrap_or_else(|| (name, None))
} else {
(name, None)
};
let (name, reference) = if name.contains(':') {
name.split_once(':')
.map(|(name, reference)| (name.to_string(), Some(reference.to_string())))
.unwrap_or((name, None))
} else {
(name, None)
};
let (reference, digest) = if let Some(reference) = reference {
if let Some(digest) = digest {
(Some(reference), Some(digest))
} else {
reference
.split_once('@')
.map(|(reff, digest)| (Some(reff.to_string()), Some(digest.to_string())))
.unwrap_or_else(|| (Some(reference), None))
}
} else {
(None, digest)
};
Ok(ImageName {
hostname,
port,
name,
reference,
digest,
})
}

View File

@ -1,18 +1,22 @@
use std::{path::Path, process::Stdio, sync::Arc};
use std::{os::unix::fs::MetadataExt, path::Path, process::Stdio, sync::Arc};
use super::OciPackedFormat;
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
vfs::VfsTree,
};
use crate::{progress::OciBoundProgress, vfs::VfsTree};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::{pin, process::Command, select};
use tokio::{
fs::{self, File},
io::BufWriter,
pin,
process::{Child, Command},
select,
};
#[derive(Debug, Clone, Copy)]
pub enum OciPackerBackendType {
MkSquashfs,
MkfsErofs,
Tar,
}
impl OciPackerBackendType {
@ -20,6 +24,7 @@ impl OciPackerBackendType {
match self {
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
OciPackerBackendType::Tar => OciPackedFormat::Tar,
}
}
@ -31,6 +36,7 @@ impl OciPackerBackendType {
OciPackerBackendType::MkfsErofs => {
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::Tar => Box::new(OciPackerTar {}) as Box<dyn OciPackerBackend>,
}
}
}
@ -47,13 +53,11 @@ impl OciPackerBackend for OciPackerMkSquashfs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress.start_packing();
})
.await;
let mut child = Command::new("mksquashfs")
let child = Command::new("mksquashfs")
.arg("-")
.arg(file)
.arg("-comp")
@ -63,7 +67,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let mut child = ChildProcessKillGuard(child);
let stdin = child
.0
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
@ -74,7 +80,7 @@ impl OciPackerBackend for OciPackerMkSquashfs {
}
Ok(())
}));
let wait = child.wait();
let wait = child.0.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
@ -110,12 +116,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
status.code().unwrap()
))
} else {
let metadata = fs::metadata(&file).await?;
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.update(|progress| progress.complete(metadata.size()))
.await;
Ok(())
}
@ -126,32 +129,32 @@ pub struct OciPackerMkfsErofs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkfsErofs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress.start_packing();
})
.await;
let mut child = Command::new("mkfs.erofs")
let child = Command::new("mkfs.erofs")
.arg("-L")
.arg("root")
.arg("--tar=-")
.arg(path)
.arg(file)
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let mut child = ChildProcessKillGuard(child);
let stdin = child
.0
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(
async move { vfs.write_to_tar(stdin).await },
));
let wait = child.wait();
let wait = child.0.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
@ -188,14 +191,46 @@ impl OciPackerBackend for OciPackerMkfsErofs {
status.code().unwrap()
))
} else {
let metadata = fs::metadata(&file).await?;
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
progress.complete(metadata.size());
})
.await;
Ok(())
}
}
}
pub struct OciPackerTar {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerTar {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.start_packing();
})
.await;
let output = File::create(file).await?;
let output = BufWriter::new(output);
vfs.write_to_tar(output).await?;
let metadata = fs::metadata(file).await?;
progress
.update(|progress| {
progress.complete(metadata.size());
})
.await;
Ok(())
}
}
struct ChildProcessKillGuard(Child);
impl Drop for ChildProcessKillGuard {
fn drop(&mut self) {
let _ = self.0.start_kill();
}
}

View File

@ -1,66 +1,121 @@
use crate::packer::{OciImagePacked, OciPackedFormat};
use crate::{
name::ImageName,
packer::{OciPackedFormat, OciPackedImage},
schema::OciSchema,
};
use anyhow::Result;
use log::debug;
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use tokio::fs;
use log::{debug, error};
use oci_spec::image::{
Descriptor, ImageConfiguration, ImageIndex, ImageIndexBuilder, ImageManifest, MediaType,
ANNOTATION_REF_NAME,
};
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use tokio::{fs, sync::RwLock};
#[derive(Clone)]
pub struct OciPackerCache {
cache_dir: PathBuf,
index: Arc<RwLock<ImageIndex>>,
}
const ANNOTATION_IMAGE_NAME: &str = "io.containerd.image.name";
const ANNOTATION_OCI_PACKER_FORMAT: &str = "dev.krata.oci.packer.format";
impl OciPackerCache {
pub fn new(cache_dir: &Path) -> Result<OciPackerCache> {
Ok(OciPackerCache {
pub async fn new(cache_dir: &Path) -> Result<OciPackerCache> {
let index = ImageIndexBuilder::default()
.schema_version(2u32)
.media_type(MediaType::ImageIndex)
.manifests(Vec::new())
.build()?;
let cache = OciPackerCache {
cache_dir: cache_dir.to_path_buf(),
})
index: Arc::new(RwLock::new(index)),
};
{
let mut mutex = cache.index.write().await;
*mutex = cache.load_index().await?;
}
Ok(cache)
}
pub async fn list(&self) -> Result<Vec<Descriptor>> {
let index = self.index.read().await;
Ok(index.manifests().clone())
}
pub async fn recall(
&self,
name: ImageName,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
) -> Result<Option<OciPackedImage>> {
let index = self.index.read().await;
let mut descriptor: Option<Descriptor> = None;
for manifest in index.manifests() {
if manifest.digest() == digest
&& manifest
.annotations()
.as_ref()
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
.map(|x| x.as_str())
== Some(format.extension())
{
descriptor = Some(manifest.clone());
break;
}
}
let Some(descriptor) = descriptor else {
return Ok(None);
};
let mut fs_path = self.cache_dir.clone();
let mut config_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
fs_path.push(format!("{}.{}", digest, format.extension()));
manifest_path.push(format!("{}.manifest.json", digest));
config_path.push(format!("{}.config.json", digest));
Ok(
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
let image_metadata = fs::metadata(&fs_path).await?;
let manifest_metadata = fs::metadata(&manifest_path).await?;
let config_metadata = fs::metadata(&config_path).await?;
if image_metadata.is_file()
&& manifest_metadata.is_file()
&& config_metadata.is_file()
{
let manifest_text = fs::read_to_string(&manifest_path).await?;
let manifest: ImageManifest = serde_json::from_str(&manifest_text)?;
let config_text = fs::read_to_string(&config_path).await?;
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
debug!("cache hit digest={}", digest);
Some(OciImagePacked::new(
digest.to_string(),
fs_path.clone(),
format,
config,
manifest,
))
} else {
None
}
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
let image_metadata = fs::metadata(&fs_path).await?;
let manifest_metadata = fs::metadata(&manifest_path).await?;
let config_metadata = fs::metadata(&config_path).await?;
if image_metadata.is_file() && manifest_metadata.is_file() && config_metadata.is_file()
{
let manifest_bytes = fs::read(&manifest_path).await?;
let manifest: ImageManifest = serde_json::from_slice(&manifest_bytes)?;
let config_bytes = fs::read(&config_path).await?;
let config: ImageConfiguration = serde_json::from_slice(&config_bytes)?;
debug!("cache hit digest={}", digest);
Ok(Some(OciPackedImage::new(
name,
digest.to_string(),
fs_path.clone(),
format,
descriptor,
OciSchema::new(config_bytes, config),
OciSchema::new(manifest_bytes, manifest),
)))
} else {
debug!("cache miss digest={}", digest);
None
},
)
Ok(None)
}
} else {
debug!("cache miss digest={}", digest);
Ok(None)
}
}
pub async fn store(&self, packed: OciImagePacked) -> Result<OciImagePacked> {
pub async fn store(&self, packed: OciPackedImage) -> Result<OciPackedImage> {
let mut index = self.index.write().await;
let mut manifests = index.manifests().clone();
debug!("cache store digest={}", packed.digest);
let mut fs_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
@ -68,17 +123,98 @@ impl OciPackerCache {
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
manifest_path.push(format!("{}.manifest.json", packed.digest));
config_path.push(format!("{}.config.json", packed.digest));
fs::copy(&packed.path, &fs_path).await?;
let manifest_text = serde_json::to_string_pretty(&packed.manifest)?;
fs::write(&manifest_path, manifest_text).await?;
let config_text = serde_json::to_string_pretty(&packed.config)?;
fs::write(&config_path, config_text).await?;
Ok(OciImagePacked::new(
if fs::rename(&packed.path, &fs_path).await.is_err() {
fs::copy(&packed.path, &fs_path).await?;
fs::remove_file(&packed.path).await?;
}
fs::write(&config_path, packed.config.raw()).await?;
fs::write(&manifest_path, packed.manifest.raw()).await?;
manifests.retain(|item| {
if item.digest() != &packed.digest {
return true;
}
let Some(format) = item
.annotations()
.as_ref()
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
.map(|x| x.as_str())
else {
return true;
};
if format != packed.format.extension() {
return true;
}
false
});
let mut descriptor = packed.descriptor.clone();
let mut annotations = descriptor.annotations().clone().unwrap_or_default();
annotations.insert(
ANNOTATION_OCI_PACKER_FORMAT.to_string(),
packed.format.extension().to_string(),
);
let image_name = packed.name.to_string();
annotations.insert(ANNOTATION_IMAGE_NAME.to_string(), image_name);
let image_ref = packed.name.reference.clone();
if let Some(image_ref) = image_ref {
annotations.insert(ANNOTATION_REF_NAME.to_string(), image_ref);
}
descriptor.set_annotations(Some(annotations));
manifests.push(descriptor.clone());
index.set_manifests(manifests);
self.save_index(&index).await?;
let packed = OciPackedImage::new(
packed.name,
packed.digest,
fs_path.clone(),
packed.format,
descriptor,
packed.config,
packed.manifest,
))
);
Ok(packed)
}
async fn save_empty_index(&self) -> Result<ImageIndex> {
let index = ImageIndexBuilder::default()
.schema_version(2u32)
.media_type(MediaType::ImageIndex)
.manifests(Vec::new())
.build()?;
self.save_index(&index).await?;
Ok(index)
}
async fn load_index(&self) -> Result<ImageIndex> {
let mut index_path = self.cache_dir.clone();
index_path.push("index.json");
if !index_path.exists() {
self.save_empty_index().await?;
}
let content = fs::read_to_string(&index_path).await?;
let index = match serde_json::from_str::<ImageIndex>(&content) {
Ok(index) => index,
Err(error) => {
error!("image index was corrupted, creating a new one: {}", error);
self.save_empty_index().await?
}
};
Ok(index)
}
async fn save_index(&self, index: &ImageIndex) -> Result<()> {
let mut encoded = serde_json::to_string_pretty(index)?;
encoded.push('\n');
let mut index_path = self.cache_dir.clone();
index_path.push("index.json");
fs::write(&index_path, encoded).await?;
Ok(())
}
}

View File

@ -1,17 +1,20 @@
use std::path::PathBuf;
use crate::{name::ImageName, schema::OciSchema};
use self::backend::OciPackerBackendType;
use oci_spec::image::{ImageConfiguration, ImageManifest};
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
pub mod backend;
pub mod cache;
pub mod service;
#[derive(Debug, Default, Clone, Copy)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash)]
pub enum OciPackedFormat {
#[default]
Squashfs,
Erofs,
Tar,
}
impl OciPackedFormat {
@ -19,6 +22,7 @@ impl OciPackedFormat {
match self {
OciPackedFormat::Squashfs => "squashfs",
OciPackedFormat::Erofs => "erofs",
OciPackedFormat::Tar => "tar",
}
}
@ -26,31 +30,38 @@ impl OciPackedFormat {
match self {
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
OciPackedFormat::Tar => OciPackerBackendType::Tar,
}
}
}
#[derive(Clone)]
pub struct OciImagePacked {
pub struct OciPackedImage {
pub name: ImageName,
pub digest: String,
pub path: PathBuf,
pub format: OciPackedFormat,
pub config: ImageConfiguration,
pub manifest: ImageManifest,
pub descriptor: Descriptor,
pub config: OciSchema<ImageConfiguration>,
pub manifest: OciSchema<ImageManifest>,
}
impl OciImagePacked {
impl OciPackedImage {
pub fn new(
name: ImageName,
digest: String,
path: PathBuf,
format: OciPackedFormat,
config: ImageConfiguration,
manifest: ImageManifest,
) -> OciImagePacked {
OciImagePacked {
descriptor: Descriptor,
config: OciSchema<ImageConfiguration>,
manifest: OciSchema<ImageManifest>,
) -> OciPackedImage {
OciPackedImage {
name,
digest,
path,
format,
descriptor,
config,
manifest,
}

View File

@ -1,58 +1,201 @@
use std::path::{Path, PathBuf};
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Display,
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::{anyhow, Result};
use oci_spec::image::Descriptor;
use tokio::{
sync::{watch, Mutex},
task::JoinHandle,
};
use crate::{
assemble::OciImageAssembler,
fetch::OciImageFetcher,
fetch::{OciImageFetcher, OciResolvedImage},
name::ImageName,
progress::{OciBoundProgress, OciProgress, OciProgressContext},
registry::OciPlatform,
};
use super::{cache::OciPackerCache, OciImagePacked, OciPackedFormat};
use log::{error, info, warn};
use super::{cache::OciPackerCache, OciPackedFormat, OciPackedImage};
pub struct OciPackerTask {
progress: OciBoundProgress,
watch: watch::Sender<Option<Result<OciPackedImage>>>,
task: JoinHandle<()>,
}
#[derive(Clone)]
pub struct OciPackerService {
seed: Option<PathBuf>,
platform: OciPlatform,
cache: OciPackerCache,
tasks: Arc<Mutex<HashMap<OciPackerTaskKey, OciPackerTask>>>,
}
impl OciPackerService {
pub fn new(
pub async fn new(
seed: Option<PathBuf>,
cache_dir: &Path,
platform: OciPlatform,
) -> Result<OciPackerService> {
Ok(OciPackerService {
seed,
cache: OciPackerCache::new(cache_dir)?,
cache: OciPackerCache::new(cache_dir).await?,
platform,
tasks: Arc::new(Mutex::new(HashMap::new())),
})
}
pub async fn list(&self) -> Result<Vec<Descriptor>> {
self.cache.list().await
}
pub async fn recall(
&self,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
self.cache.recall(digest, format).await
) -> Result<Option<OciPackedImage>> {
if digest.contains('/') || digest.contains('\\') || digest.contains("..") {
return Ok(None);
}
self.cache
.recall(ImageName::parse("cached:latest")?, digest, format)
.await
}
pub async fn request(
&self,
name: ImageName,
format: OciPackedFormat,
overwrite: bool,
progress_context: OciProgressContext,
) -> Result<OciImagePacked> {
) -> Result<OciPackedImage> {
let progress = OciProgress::new();
let progress = OciBoundProgress::new(progress_context.clone(), progress);
let fetcher =
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
let resolved = fetcher.resolve(name).await?;
if let Some(cached) = self.cache.recall(&resolved.digest, format).await? {
return Ok(cached);
let resolved = fetcher.resolve(name.clone()).await?;
let key = OciPackerTaskKey {
digest: resolved.digest.clone(),
format,
};
let (progress_copy_task, mut receiver) = match self.tasks.lock().await.entry(key.clone()) {
Entry::Occupied(entry) => {
let entry = entry.get();
(
Some(entry.progress.also_update(progress_context).await),
entry.watch.subscribe(),
)
}
Entry::Vacant(entry) => {
let task = self
.clone()
.launch(
name,
key.clone(),
format,
overwrite,
resolved,
fetcher,
progress.clone(),
)
.await;
let (watch, receiver) = watch::channel(None);
let task = OciPackerTask {
progress: progress.clone(),
task,
watch,
};
entry.insert(task);
(None, receiver)
}
};
let _progress_task_guard = scopeguard::guard(progress_copy_task, |task| {
if let Some(task) = task {
task.abort();
}
});
let _task_cancel_guard = scopeguard::guard(self.clone(), |service| {
service.maybe_cancel_task(key);
});
loop {
receiver.changed().await?;
let current = receiver.borrow_and_update();
if current.is_some() {
return current
.as_ref()
.map(|x| x.as_ref().map_err(|err| anyhow!("{}", err)).cloned())
.unwrap();
}
}
}
#[allow(clippy::too_many_arguments)]
async fn launch(
self,
name: ImageName,
key: OciPackerTaskKey,
format: OciPackedFormat,
overwrite: bool,
resolved: OciResolvedImage,
fetcher: OciImageFetcher,
progress: OciBoundProgress,
) -> JoinHandle<()> {
info!("started packer task {}", key);
tokio::task::spawn(async move {
let _task_drop_guard =
scopeguard::guard((key.clone(), self.clone()), |(key, service)| {
service.ensure_task_gone(key);
});
if let Err(error) = self
.task(
name,
key.clone(),
format,
overwrite,
resolved,
fetcher,
progress,
)
.await
{
self.finish(&key, Err(error)).await;
}
})
}
#[allow(clippy::too_many_arguments)]
async fn task(
&self,
name: ImageName,
key: OciPackerTaskKey,
format: OciPackedFormat,
overwrite: bool,
resolved: OciResolvedImage,
fetcher: OciImageFetcher,
progress: OciBoundProgress,
) -> Result<()> {
if !overwrite {
if let Some(cached) = self
.cache
.recall(name.clone(), &resolved.digest, format)
.await?
{
self.finish(&key, Ok(cached)).await;
return Ok(());
}
}
let assembler =
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
@ -67,15 +210,69 @@ impl OciPackerService {
packer
.pack(progress, assembled.vfs.clone(), &target)
.await?;
let packed = OciImagePacked::new(
let packed = OciPackedImage::new(
name,
assembled.digest.clone(),
file,
format,
assembled.descriptor.clone(),
assembled.config.clone(),
assembled.manifest.clone(),
);
let packed = self.cache.store(packed).await?;
Ok(packed)
self.finish(&key, Ok(packed)).await;
Ok(())
}
async fn finish(&self, key: &OciPackerTaskKey, result: Result<OciPackedImage>) {
let Some(task) = self.tasks.lock().await.remove(key) else {
error!("packer task {} was not found when task completed", key);
return;
};
match result.as_ref() {
Ok(_) => {
info!("completed packer task {}", key);
}
Err(err) => {
warn!("packer task {} failed: {}", key, err);
}
}
task.watch.send_replace(Some(result));
}
fn maybe_cancel_task(self, key: OciPackerTaskKey) {
tokio::task::spawn(async move {
let tasks = self.tasks.lock().await;
if let Some(task) = tasks.get(&key) {
if task.watch.is_closed() {
task.task.abort();
}
}
});
}
fn ensure_task_gone(self, key: OciPackerTaskKey) {
tokio::task::spawn(async move {
let mut tasks = self.tasks.lock().await;
if let Some(task) = tasks.remove(&key) {
warn!("aborted packer task {}", key);
task.watch.send_replace(Some(Err(anyhow!("task aborted"))));
}
});
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
struct OciPackerTaskKey {
digest: String,
format: OciPackedFormat,
}
impl Display for OciPackerTaskKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{}:{}", self.digest, self.format.extension()))
}
}

View File

@ -1,14 +1,16 @@
use std::sync::Arc;
use indexmap::IndexMap;
use tokio::sync::{mpsc::Sender, Mutex};
use std::sync::Arc;
use tokio::{
sync::{watch, Mutex},
task::JoinHandle,
};
#[derive(Clone, Debug)]
pub struct OciProgress {
pub phase: OciProgressPhase,
pub digest: Option<String>,
pub layers: IndexMap<String, OciProgressLayer>,
pub value: u64,
pub total: u64,
pub indication: OciProgressIndication,
}
impl Default for OciProgress {
@ -20,72 +22,146 @@ impl Default for OciProgress {
impl OciProgress {
pub fn new() -> Self {
OciProgress {
phase: OciProgressPhase::Resolving,
phase: OciProgressPhase::Started,
digest: None,
layers: IndexMap::new(),
value: 0,
total: 1,
indication: OciProgressIndication::Hidden,
}
}
pub fn add_layer(&mut self, id: &str, size: usize) {
pub fn start_resolving(&mut self) {
self.phase = OciProgressPhase::Resolving;
self.indication = OciProgressIndication::Spinner { message: None };
}
pub fn resolved(&mut self, digest: &str) {
self.digest = Some(digest.to_string());
self.indication = OciProgressIndication::Hidden;
}
pub fn add_layer(&mut self, id: &str) {
self.layers.insert(
id.to_string(),
OciProgressLayer {
id: id.to_string(),
phase: OciProgressLayerPhase::Waiting,
value: 0,
total: size as u64,
indication: OciProgressIndication::Spinner { message: None },
},
);
}
pub fn downloading_layer(&mut self, id: &str, downloaded: usize, total: usize) {
pub fn downloading_layer(&mut self, id: &str, downloaded: u64, total: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Downloading;
entry.value = downloaded as u64;
entry.total = total as u64;
entry.indication = OciProgressIndication::ProgressBar {
message: None,
current: downloaded,
total,
bytes: true,
};
}
}
pub fn downloaded_layer(&mut self, id: &str) {
pub fn downloaded_layer(&mut self, id: &str, total: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Downloaded;
entry.value = entry.total;
entry.indication = OciProgressIndication::Completed {
message: None,
total: Some(total),
bytes: true,
};
}
}
pub fn extracting_layer(&mut self, id: &str, extracted: usize, total: usize) {
pub fn start_assemble(&mut self) {
self.phase = OciProgressPhase::Assemble;
self.indication = OciProgressIndication::Hidden;
}
pub fn start_extracting_layer(&mut self, id: &str) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracting;
entry.value = extracted as u64;
entry.total = total as u64;
entry.indication = OciProgressIndication::Spinner { message: None };
}
}
pub fn extracted_layer(&mut self, id: &str) {
pub fn extracting_layer(&mut self, id: &str, file: &str) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracting;
entry.indication = OciProgressIndication::Spinner {
message: Some(file.to_string()),
};
}
}
pub fn extracted_layer(&mut self, id: &str, count: u64, total_size: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracted;
entry.value = entry.total;
entry.indication = OciProgressIndication::Completed {
message: Some(format!("{} files", count)),
total: Some(total_size),
bytes: true,
};
}
}
pub fn start_packing(&mut self) {
self.phase = OciProgressPhase::Pack;
for layer in self.layers.values_mut() {
layer.indication = OciProgressIndication::Hidden;
}
self.indication = OciProgressIndication::Spinner { message: None };
}
pub fn complete(&mut self, size: u64) {
self.phase = OciProgressPhase::Complete;
self.indication = OciProgressIndication::Completed {
message: None,
total: Some(size),
bytes: true,
}
}
}
#[derive(Clone, Debug)]
pub enum OciProgressPhase {
Started,
Resolving,
Resolved,
ConfigAcquire,
LayerAcquire,
Packing,
ConfigDownload,
LayerDownload,
Assemble,
Pack,
Complete,
}
#[derive(Clone, Debug)]
pub enum OciProgressIndication {
Hidden,
ProgressBar {
message: Option<String>,
current: u64,
total: u64,
bytes: bool,
},
Spinner {
message: Option<String>,
},
Completed {
message: Option<String>,
total: Option<u64>,
bytes: bool,
},
}
#[derive(Clone, Debug)]
pub struct OciProgressLayer {
pub id: String,
pub phase: OciProgressLayerPhase,
pub value: u64,
pub total: u64,
pub indication: OciProgressIndication,
}
#[derive(Clone, Debug)]
@ -99,16 +175,25 @@ pub enum OciProgressLayerPhase {
#[derive(Clone)]
pub struct OciProgressContext {
sender: Sender<OciProgress>,
sender: watch::Sender<OciProgress>,
}
impl OciProgressContext {
pub fn new(sender: Sender<OciProgress>) -> OciProgressContext {
pub fn create() -> (OciProgressContext, watch::Receiver<OciProgress>) {
let (sender, receiver) = watch::channel(OciProgress::new());
(OciProgressContext::new(sender), receiver)
}
pub fn new(sender: watch::Sender<OciProgress>) -> OciProgressContext {
OciProgressContext { sender }
}
pub fn update(&self, progress: &OciProgress) {
let _ = self.sender.try_send(progress.clone());
let _ = self.sender.send(progress.clone());
}
pub fn subscribe(&self) -> watch::Receiver<OciProgress> {
self.sender.subscribe()
}
}
@ -137,4 +222,17 @@ impl OciBoundProgress {
function(&mut progress);
self.context.update(&progress);
}
pub async fn also_update(&self, context: OciProgressContext) -> JoinHandle<()> {
let progress = self.instance.lock().await.clone();
context.update(&progress);
let mut receiver = self.context.subscribe();
tokio::task::spawn(async move {
while (receiver.changed().await).is_ok() {
context
.sender
.send_replace(receiver.borrow_and_update().clone());
}
})
}
}

View File

@ -7,7 +7,7 @@ use reqwest::{Client, RequestBuilder, Response, StatusCode};
use tokio::{fs::File, io::AsyncWriteExt};
use url::Url;
use crate::progress::OciBoundProgress;
use crate::{name::ImageName, progress::OciBoundProgress, schema::OciSchema};
#[derive(Clone, Debug)]
pub struct OciPlatform {
@ -149,24 +149,20 @@ impl OciRegistryClient {
))?;
let mut response = self.call(self.agent.get(url.as_str())).await?;
let mut size: u64 = 0;
let mut last_progress_size: u64 = 0;
while let Some(chunk) = response.chunk().await? {
dest.write_all(&chunk).await?;
size += chunk.len() as u64;
if (size - last_progress_size) > (5 * 1024 * 1024) {
last_progress_size = size;
if let Some(ref progress) = progress {
progress
.update(|progress| {
progress.downloading_layer(
descriptor.digest(),
size as usize,
descriptor.size() as usize,
);
})
.await;
}
if let Some(ref progress) = progress {
progress
.update(|progress| {
progress.downloading_layer(
descriptor.digest(),
size,
descriptor.size() as u64,
);
})
.await;
}
}
Ok(size)
@ -176,11 +172,11 @@ impl OciRegistryClient {
&mut self,
name: N,
reference: R,
) -> Result<(ImageManifest, String)> {
) -> Result<(OciSchema<ImageManifest>, String)> {
let url = self.url.join(&format!(
"/v2/{}/manifests/{}",
name.as_ref(),
reference.as_ref()
reference.as_ref(),
))?;
let accept = format!(
"{}, {}, {}, {}",
@ -198,20 +194,28 @@ impl OciRegistryClient {
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
.to_str()?
.to_string();
let manifest = serde_json::from_str(&response.text().await?)?;
Ok((manifest, digest))
let bytes = response.bytes().await?;
let manifest = serde_json::from_slice(&bytes)?;
Ok((OciSchema::new(bytes.to_vec(), manifest), digest))
}
pub async fn get_manifest_with_digest<N: AsRef<str>, R: AsRef<str>>(
&mut self,
name: N,
reference: R,
) -> Result<(ImageManifest, String)> {
let url = self.url.join(&format!(
"/v2/{}/manifests/{}",
name.as_ref(),
reference.as_ref()
))?;
reference: Option<R>,
digest: Option<N>,
) -> Result<(OciSchema<ImageManifest>, Option<Descriptor>, String)> {
let what = digest
.as_ref()
.map(|x| x.as_ref().to_string())
.unwrap_or_else(|| {
reference
.map(|x| x.as_ref().to_string())
.unwrap_or_else(|| ImageName::DEFAULT_IMAGE_TAG.to_string())
});
let url = self
.url
.join(&format!("/v2/{}/manifests/{}", name.as_ref(), what,))?;
let accept = format!(
"{}, {}, {}, {}",
MediaType::ImageManifest.to_docker_v2s2()?,
@ -234,18 +238,21 @@ impl OciRegistryClient {
let descriptor = self
.pick_manifest(index)
.ok_or_else(|| anyhow!("unable to pick manifest from index"))?;
return self
let (manifest, digest) = self
.get_raw_manifest_with_digest(name, descriptor.digest())
.await;
.await?;
return Ok((manifest, Some(descriptor), digest));
}
let digest = response
.headers()
.get("Docker-Content-Digest")
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
.to_str()?
.to_string();
let manifest = serde_json::from_str(&response.text().await?)?;
Ok((manifest, digest))
.and_then(|x| x.to_str().ok())
.map(|x| x.to_string())
.or_else(|| digest.map(|x: N| x.as_ref().to_string()))
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?;
let bytes = response.bytes().await?;
let manifest = serde_json::from_slice(&bytes)?;
Ok((OciSchema::new(bytes.to_vec(), manifest), None, digest))
}
fn pick_manifest(&mut self, index: ImageIndex) -> Option<Descriptor> {

29
crates/oci/src/schema.rs Normal file
View File

@ -0,0 +1,29 @@
use std::fmt::Debug;
#[derive(Clone, Debug)]
pub struct OciSchema<T: Clone + Debug> {
raw: Vec<u8>,
item: T,
}
impl<T: Clone + Debug> OciSchema<T> {
pub fn new(raw: Vec<u8>, item: T) -> OciSchema<T> {
OciSchema { raw, item }
}
pub fn raw(&self) -> &[u8] {
&self.raw
}
pub fn item(&self) -> &T {
&self.item
}
pub fn into_raw(self) -> Vec<u8> {
self.raw
}
pub fn into_item(self) -> T {
self.item
}
}

View File

@ -194,7 +194,7 @@ impl VfsTree {
}
}
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<&VfsNode> {
let mut meta = VfsNode::from(entry)?;
let path = entry.path()?.to_path_buf();
let parent = if let Some(parent) = path.parent() {
@ -218,8 +218,11 @@ impl VfsTree {
meta.children = old.children;
}
}
parent.children.push(meta);
Ok(())
parent.children.push(meta.clone());
let Some(reference) = parent.children.iter().find(|child| child.name == meta.name) else {
return Err(anyhow!("unable to find inserted child in vfs"));
};
Ok(reference)
}
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {

View File

@ -12,18 +12,18 @@ resolver = "2"
anyhow = { workspace = true }
backhand = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.10" }
krata-advmac = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.9" }
krata-oci = { path = "../oci", version = "^0.0.10" }
log = { workspace = true }
loopdev-3 = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.9" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.9" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.9" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.10" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.10" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.10" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.10" }
[lib]
name = "kratart"

View File

@ -1,7 +1,7 @@
use anyhow::Result;
use backhand::{FilesystemWriter, NodeHeader};
use krata::launchcfg::LaunchInfo;
use krataoci::packer::OciImagePacked;
use krataoci::packer::OciPackedImage;
use log::trace;
use std::fs;
use std::fs::File;
@ -9,13 +9,13 @@ use std::path::PathBuf;
use uuid::Uuid;
pub struct ConfigBlock<'a> {
pub image: &'a OciImagePacked,
pub image: &'a OciPackedImage,
pub file: PathBuf,
pub dir: PathBuf,
}
impl ConfigBlock<'_> {
pub fn new<'a>(uuid: &Uuid, image: &'a OciImagePacked) -> Result<ConfigBlock<'a>> {
pub fn new<'a>(uuid: &Uuid, image: &'a OciPackedImage) -> Result<ConfigBlock<'a>> {
let mut dir = std::env::temp_dir().clone();
dir.push(format!("krata-cfg-{}", uuid));
fs::create_dir_all(&dir)?;
@ -26,7 +26,7 @@ impl ConfigBlock<'_> {
pub fn build(&self, launch_config: &LaunchInfo) -> Result<()> {
trace!("build launch_config={:?}", launch_config);
let manifest = self.image.config.to_string()?;
let config = self.image.config.raw();
let launch = serde_json::to_string(launch_config)?;
let mut writer = FilesystemWriter::default();
writer.push_dir(
@ -39,7 +39,7 @@ impl ConfigBlock<'_> {
},
)?;
writer.push_file(
manifest.as_bytes(),
config,
"/image/config.json",
NodeHeader {
permissions: 384,

View File

@ -10,7 +10,7 @@ use krata::launchcfg::{
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
LaunchPackedFormat, LaunchRoot,
};
use krataoci::packer::OciImagePacked;
use krataoci::packer::OciPackedImage;
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
@ -23,6 +23,8 @@ use super::{GuestInfo, GuestState};
pub struct GuestLaunchRequest {
pub format: LaunchPackedFormat,
pub kernel: Vec<u8>,
pub initrd: Vec<u8>,
pub uuid: Option<Uuid>,
pub name: Option<String>,
pub vcpus: u32,
@ -30,7 +32,7 @@ pub struct GuestLaunchRequest {
pub env: HashMap<String, String>,
pub run: Option<Vec<String>>,
pub debug: bool,
pub image: OciImagePacked,
pub image: OciPackedImage,
}
pub struct GuestLauncher {
@ -173,22 +175,22 @@ impl GuestLauncher {
let config = DomainConfig {
backend_domid: 0,
name: &xen_name,
name: xen_name,
max_vcpus: request.vcpus,
mem_mb: request.mem,
kernel_path: &context.kernel,
initrd_path: &context.initrd,
cmdline: &cmdline,
use_console_backend: Some("krata-console"),
kernel: request.kernel,
initrd: request.initrd,
cmdline,
use_console_backend: Some("krata-console".to_string()),
disks: vec![
DomainDisk {
vdev: "xvda",
block: &image_squashfs_loop,
vdev: "xvda".to_string(),
block: image_squashfs_loop.clone(),
writable: false,
},
DomainDisk {
vdev: "xvdb",
block: &cfgblk_squashfs_loop,
vdev: "xvdb".to_string(),
block: cfgblk_squashfs_loop.clone(),
writable: false,
},
],
@ -197,7 +199,7 @@ impl GuestLauncher {
initialized: false,
}],
vifs: vec![DomainNetworkInterface {
mac: &guest_mac_string,
mac: guest_mac_string.clone(),
mtu: 1500,
bridge: None,
script: None,

View File

@ -1,9 +1,4 @@
use std::{
fs,
path::{Path, PathBuf},
str::FromStr,
sync::Arc,
};
use std::{fs, path::PathBuf, str::FromStr, sync::Arc};
use anyhow::{anyhow, Result};
use ipnetwork::IpNetwork;
@ -52,43 +47,17 @@ pub struct GuestInfo {
pub struct RuntimeContext {
pub autoloop: AutoLoop,
pub xen: XenClient,
pub kernel: String,
pub initrd: String,
}
impl RuntimeContext {
pub async fn new(store: String) -> Result<Self> {
let mut image_cache_path = PathBuf::from(&store);
image_cache_path.push("cache");
fs::create_dir_all(&image_cache_path)?;
pub async fn new() -> Result<Self> {
let xen = XenClient::open(0).await?;
image_cache_path.push("image");
fs::create_dir_all(&image_cache_path)?;
let kernel = RuntimeContext::detect_guest_file(&store, "kernel")?;
let initrd = RuntimeContext::detect_guest_file(&store, "initrd")?;
Ok(RuntimeContext {
autoloop: AutoLoop::new(LoopControl::open()?),
xen,
kernel,
initrd,
})
}
fn detect_guest_file(store: &str, name: &str) -> Result<String> {
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
if path.is_file() {
return path_as_string(&path);
}
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
if path.is_file() {
return path_as_string(&path);
}
Err(anyhow!("unable to find required guest file: {}", name))
}
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
let mut guests: Vec<GuestInfo> = Vec::new();
for domid_candidate in self.xen.store.list("/local/domain").await? {
@ -248,16 +217,14 @@ impl RuntimeContext {
#[derive(Clone)]
pub struct Runtime {
store: Arc<String>,
context: RuntimeContext,
launch_semaphore: Arc<Semaphore>,
}
impl Runtime {
pub async fn new(store: String) -> Result<Self> {
let context = RuntimeContext::new(store.clone()).await?;
pub async fn new() -> Result<Self> {
let context = RuntimeContext::new().await?;
Ok(Self {
store: Arc::new(store),
context,
launch_semaphore: Arc::new(Semaphore::new(1)),
})
@ -320,12 +287,6 @@ impl Runtime {
}
pub async fn dupe(&self) -> Result<Runtime> {
Runtime::new((*self.store).clone()).await
Runtime::new().await
}
}
fn path_as_string(path: &Path) -> Result<String> {
path.to_str()
.ok_or_else(|| anyhow!("unable to convert path to string"))
.map(|x| x.to_string())
}

View File

@ -14,8 +14,8 @@ elf = { workspace = true }
flate2 = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.9" }
krata-xenstore = { path = "../xenstore", version = "^0.0.9" }
krata-xencall = { path = "../xencall", version = "^0.0.10" }
krata-xenstore = { path = "../xenstore", version = "^0.0.10" }
memchr = { workspace = true }
nix = { workspace = true }
slice-copy = { workspace = true }

View File

@ -1,4 +1,5 @@
use std::{env, process};
use tokio::fs;
use xenclient::error::Result;
use xenclient::{DomainConfig, XenClient};
@ -16,12 +17,12 @@ async fn main() -> Result<()> {
let client = XenClient::open(0).await?;
let config = DomainConfig {
backend_domid: 0,
name: "xenclient-test",
name: "xenclient-test".to_string(),
max_vcpus: 1,
mem_mb: 512,
kernel_path: kernel_image_path.as_str(),
initrd_path: initrd_path.as_str(),
cmdline: "debug elevator=noop",
kernel: fs::read(&kernel_image_path).await?,
initrd: fs::read(&initrd_path).await?,
cmdline: "debug elevator=noop".to_string(),
use_console_backend: None,
disks: vec![],
channels: vec![],

View File

@ -107,17 +107,15 @@ impl ElfImageLoader {
ElfImageLoader::load_xz(file.as_slice())
}
pub fn load_file_kernel(path: &str) -> Result<ElfImageLoader> {
let file = std::fs::read(path)?;
for start in find_iter(file.as_slice(), &[0x1f, 0x8b]) {
if let Ok(elf) = ElfImageLoader::load_gz(&file[start..]) {
pub fn load_file_kernel(data: &[u8]) -> Result<ElfImageLoader> {
for start in find_iter(data, &[0x1f, 0x8b]) {
if let Ok(elf) = ElfImageLoader::load_gz(&data[start..]) {
return Ok(elf);
}
}
for start in find_iter(file.as_slice(), &[0xfd, 0x37, 0x7a, 0x58]) {
if let Ok(elf) = ElfImageLoader::load_xz(&file[start..]) {
for start in find_iter(data, &[0xfd, 0x37, 0x7a, 0x58]) {
if let Ok(elf) = ElfImageLoader::load_xz(&data[start..]) {
return Ok(elf);
}
}

View File

@ -23,7 +23,6 @@ use boot::BootState;
use log::{debug, trace, warn};
use tokio::time::timeout;
use std::fs::read;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;
@ -40,60 +39,60 @@ pub struct XenClient {
call: XenCall,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct BlockDeviceRef {
pub path: String,
pub major: u32,
pub minor: u32,
}
#[derive(Debug)]
pub struct DomainDisk<'a> {
pub vdev: &'a str,
pub block: &'a BlockDeviceRef,
#[derive(Clone, Debug)]
pub struct DomainDisk {
pub vdev: String,
pub block: BlockDeviceRef,
pub writable: bool,
}
#[derive(Debug)]
pub struct DomainFilesystem<'a> {
pub path: &'a str,
pub tag: &'a str,
#[derive(Clone, Debug)]
pub struct DomainFilesystem {
pub path: String,
pub tag: String,
}
#[derive(Debug)]
pub struct DomainNetworkInterface<'a> {
pub mac: &'a str,
#[derive(Clone, Debug)]
pub struct DomainNetworkInterface {
pub mac: String,
pub mtu: u32,
pub bridge: Option<&'a str>,
pub script: Option<&'a str>,
pub bridge: Option<String>,
pub script: Option<String>,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct DomainChannel {
pub typ: String,
pub initialized: bool,
}
#[derive(Debug)]
pub struct DomainEventChannel<'a> {
pub name: &'a str,
#[derive(Clone, Debug)]
pub struct DomainEventChannel {
pub name: String,
}
#[derive(Debug)]
pub struct DomainConfig<'a> {
#[derive(Clone, Debug)]
pub struct DomainConfig {
pub backend_domid: u32,
pub name: &'a str,
pub name: String,
pub max_vcpus: u32,
pub mem_mb: u64,
pub kernel_path: &'a str,
pub initrd_path: &'a str,
pub cmdline: &'a str,
pub disks: Vec<DomainDisk<'a>>,
pub use_console_backend: Option<&'a str>,
pub kernel: Vec<u8>,
pub initrd: Vec<u8>,
pub cmdline: String,
pub disks: Vec<DomainDisk>,
pub use_console_backend: Option<String>,
pub channels: Vec<DomainChannel>,
pub vifs: Vec<DomainNetworkInterface<'a>>,
pub filesystems: Vec<DomainFilesystem<'a>>,
pub event_channels: Vec<DomainEventChannel<'a>>,
pub vifs: Vec<DomainNetworkInterface>,
pub filesystems: Vec<DomainFilesystem>,
pub event_channels: Vec<DomainEventChannel>,
pub extra_keys: Vec<(String, String)>,
pub extra_rw_paths: Vec<String>,
}
@ -117,7 +116,7 @@ impl XenClient {
Ok(XenClient { store, call })
}
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> {
let mut domain = CreateDomain {
max_vcpus: config.max_vcpus,
..Default::default()
@ -143,7 +142,7 @@ impl XenClient {
&self,
domid: u32,
domain: &CreateDomain,
config: &DomainConfig<'_>,
config: &DomainConfig,
) -> Result<CreatedDomain> {
trace!(
"XenClient init domid={} domain={:?} config={:?}",
@ -237,9 +236,9 @@ impl XenClient {
&Uuid::from_bytes(domain.handle).to_string(),
)
.await?;
tx.write_string(format!("{}/name", dom_path).as_str(), config.name)
tx.write_string(format!("{}/name", dom_path).as_str(), &config.name)
.await?;
tx.write_string(format!("{}/name", vm_path).as_str(), config.name)
tx.write_string(format!("{}/name", vm_path).as_str(), &config.name)
.await?;
for (key, value) in &config.extra_keys {
@ -257,7 +256,7 @@ impl XenClient {
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
let image_loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
let xenstore_evtchn: u32;
let xenstore_mfn: u64;
@ -270,18 +269,17 @@ impl XenClient {
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
#[cfg(target_arch = "aarch64")]
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
let initrd = read(config.initrd_path)?;
state = boot
.initialize(
&mut arch,
&image_loader,
initrd.as_slice(),
&config.initrd,
config.max_vcpus,
config.mem_mb,
1,
)
.await?;
boot.boot(&mut arch, &mut state, config.cmdline).await?;
boot.boot(&mut arch, &mut state, &config.cmdline).await?;
xenstore_evtchn = state.store_evtchn;
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
p2m = boot.phys.p2m;
@ -291,19 +289,9 @@ impl XenClient {
let tx = self.store.transaction().await?;
tx.write_string(format!("{}/image/os_type", vm_path).as_str(), "linux")
.await?;
tx.write_string(
format!("{}/image/kernel", vm_path).as_str(),
config.kernel_path,
)
.await?;
tx.write_string(
format!("{}/image/ramdisk", vm_path).as_str(),
config.initrd_path,
)
.await?;
tx.write_string(
format!("{}/image/cmdline", vm_path).as_str(),
config.cmdline,
&config.cmdline,
)
.await?;
@ -352,7 +340,8 @@ impl XenClient {
&DomainChannel {
typ: config
.use_console_backend
.unwrap_or("xenconsoled")
.clone()
.unwrap_or("xenconsoled".to_string())
.to_string(),
initialized: true,
},
@ -429,7 +418,7 @@ impl XenClient {
.await?;
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
self.store
.write_string(&format!("{}/name", channel_path), channel.name)
.write_string(&format!("{}/name", channel_path), &channel.name)
.await?;
self.store
.write_string(&format!("{}/channel", channel_path), &id.to_string())
@ -447,7 +436,7 @@ impl XenClient {
backend_domid: u32,
domid: u32,
index: usize,
disk: &DomainDisk<'_>,
disk: &DomainDisk,
) -> Result<()> {
let id = (202 << 8) | (index << 4) as u64;
let backend_items: Vec<(&str, String)> = vec![
@ -567,7 +556,7 @@ impl XenClient {
backend_domid: u32,
domid: u32,
index: usize,
filesystem: &DomainFilesystem<'_>,
filesystem: &DomainFilesystem,
) -> Result<()> {
let id = 90 + index as u64;
let backend_items: Vec<(&str, String)> = vec![
@ -605,7 +594,7 @@ impl XenClient {
backend_domid: u32,
domid: u32,
index: usize,
vif: &DomainNetworkInterface<'_>,
vif: &DomainNetworkInterface,
) -> Result<()> {
let id = 20 + index as u64;
let mut backend_items: Vec<(&str, String)> = vec![
@ -619,12 +608,12 @@ impl XenClient {
];
if vif.bridge.is_some() {
backend_items.extend_from_slice(&[("bridge", vif.bridge.unwrap().to_string())]);
backend_items.extend_from_slice(&[("bridge", vif.bridge.clone().unwrap())]);
}
if vif.script.is_some() {
backend_items.extend_from_slice(&[
("script", vif.script.unwrap().to_string()),
("script", vif.script.clone().unwrap()),
("hotplug-status", "".to_string()),
]);
} else {