mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 21:21:32 +00:00
Compare commits
16 Commits
Author | SHA1 | Date | |
---|---|---|---|
95fbc62486 | |||
284ed8f17b | |||
82576df7b7 | |||
1b90eedbcd | |||
aa941c6e87 | |||
d0bf3c4c77 | |||
38e892e249 | |||
1a90372037 | |||
4754cdd128 | |||
f843abcabf | |||
e8d89d4d5b | |||
4e9738b959 | |||
8135307283 | |||
e450ebd2a2 | |||
79f7742caa | |||
c3c18271b4 |
15
CHANGELOG.md
15
CHANGELOG.md
@ -6,6 +6,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.0.10](https://github.com/edera-dev/krata/compare/v0.0.9...v0.0.10) - 2024-04-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- implement guest exec ([#107](https://github.com/edera-dev/krata/pull/107))
|
||||||
|
- implement kernel / initrd oci image support ([#103](https://github.com/edera-dev/krata/pull/103))
|
||||||
|
- idm v2 ([#102](https://github.com/edera-dev/krata/pull/102))
|
||||||
|
- oci concurrency improvements ([#95](https://github.com/edera-dev/krata/pull/95))
|
||||||
|
- oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls ([#88](https://github.com/edera-dev/krata/pull/88))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- oci cache store should fallback to copy when rename won't work ([#96](https://github.com/edera-dev/krata/pull/96))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- update Cargo.lock dependencies
|
||||||
|
|
||||||
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
|
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
74
Cargo.lock
generated
74
Cargo.lock
generated
@ -1371,7 +1371,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata"
|
name = "krata"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@ -1408,10 +1408,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-ctl"
|
name = "krata-ctl"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
|
"base64 0.22.0",
|
||||||
"clap",
|
"clap",
|
||||||
"comfy-table",
|
"comfy-table",
|
||||||
"crossterm",
|
"crossterm",
|
||||||
@ -1425,6 +1426,7 @@ dependencies = [
|
|||||||
"prost-reflect",
|
"prost-reflect",
|
||||||
"prost-types",
|
"prost-types",
|
||||||
"ratatui",
|
"ratatui",
|
||||||
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
"termtree",
|
"termtree",
|
||||||
@ -1436,7 +1438,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-daemon"
|
name = "krata-daemon"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
@ -1449,9 +1451,11 @@ dependencies = [
|
|||||||
"krata",
|
"krata",
|
||||||
"krata-oci",
|
"krata-oci",
|
||||||
"krata-runtime",
|
"krata-runtime",
|
||||||
|
"krata-tokio-tar",
|
||||||
"log",
|
"log",
|
||||||
"prost",
|
"prost",
|
||||||
"redb",
|
"redb",
|
||||||
|
"scopeguard",
|
||||||
"signal-hook",
|
"signal-hook",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@ -1461,7 +1465,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-guest"
|
name = "krata-guest"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"cgroups-rs",
|
"cgroups-rs",
|
||||||
@ -1485,7 +1489,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-network"
|
name = "krata-network"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@ -1509,7 +1513,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-oci"
|
name = "krata-oci"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-compression",
|
"async-compression",
|
||||||
@ -1536,7 +1540,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-runtime"
|
name = "krata-runtime"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"backhand",
|
"backhand",
|
||||||
@ -1573,7 +1577,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-xencall"
|
name = "krata-xencall"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"libc",
|
"libc",
|
||||||
@ -1586,7 +1590,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-xenclient"
|
name = "krata-xenclient"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"elf",
|
"elf",
|
||||||
@ -1607,7 +1611,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-xenevtchn"
|
name = "krata-xenevtchn"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
@ -1618,7 +1622,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-xengnt"
|
name = "krata-xengnt"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"nix 0.28.0",
|
"nix 0.28.0",
|
||||||
@ -1627,7 +1631,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-xenstore"
|
name = "krata-xenstore"
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
@ -2264,9 +2268,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ratatui"
|
name = "ratatui"
|
||||||
version = "0.26.1"
|
version = "0.26.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8"
|
checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.5.0",
|
"bitflags 2.5.0",
|
||||||
"cassowary",
|
"cassowary",
|
||||||
@ -2304,9 +2308,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "redb"
|
name = "redb"
|
||||||
version = "2.0.0"
|
version = "2.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a1100a056c5dcdd4e5513d5333385223b26ef1bf92f31eb38f407e8c20549256"
|
checksum = "ed7508e692a49b6b2290b56540384ccae9b1fb4d77065640b165835b56ffe3bb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
@ -2360,9 +2364,9 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.12.3"
|
version = "0.12.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19"
|
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.0",
|
"base64 0.22.0",
|
||||||
"bytes",
|
"bytes",
|
||||||
@ -2459,9 +2463,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls"
|
name = "rustls"
|
||||||
version = "0.22.3"
|
version = "0.22.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c"
|
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"ring",
|
"ring",
|
||||||
@ -2527,9 +2531,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde"
|
name = "serde"
|
||||||
version = "1.0.197"
|
version = "1.0.198"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
|
checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
]
|
]
|
||||||
@ -2546,9 +2550,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_derive"
|
name = "serde_derive"
|
||||||
version = "1.0.197"
|
version = "1.0.198"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
|
checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@ -2557,9 +2561,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.115"
|
version = "1.0.116"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
|
checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa",
|
"itoa",
|
||||||
"ryu",
|
"ryu",
|
||||||
@ -2717,12 +2721,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stability"
|
name = "stability"
|
||||||
version = "0.1.1"
|
version = "0.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce"
|
checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"syn 1.0.109",
|
"syn 2.0.57",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2820,9 +2824,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sysinfo"
|
name = "sysinfo"
|
||||||
version = "0.30.10"
|
version = "0.30.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b"
|
checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"core-foundation-sys",
|
"core-foundation-sys",
|
||||||
@ -2859,18 +2863,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.58"
|
version = "1.0.59"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
|
checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"thiserror-impl",
|
"thiserror-impl",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror-impl"
|
name = "thiserror-impl"
|
||||||
version = "1.0.58"
|
version = "1.0.59"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
|
checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
15
Cargo.toml
15
Cargo.toml
@ -16,7 +16,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.0.9"
|
version = "0.0.10"
|
||||||
homepage = "https://krata.dev"
|
homepage = "https://krata.dev"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
repository = "https://github.com/edera-dev/krata"
|
repository = "https://github.com/edera-dev/krata"
|
||||||
@ -28,6 +28,7 @@ async-compression = "0.4.8"
|
|||||||
async-stream = "0.3.5"
|
async-stream = "0.3.5"
|
||||||
async-trait = "0.1.80"
|
async-trait = "0.1.80"
|
||||||
backhand = "0.15.0"
|
backhand = "0.15.0"
|
||||||
|
base64 = "0.22.0"
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
bytes = "1.5.0"
|
bytes = "1.5.0"
|
||||||
cgroups-rs = "0.3.4"
|
cgroups-rs = "0.3.4"
|
||||||
@ -61,17 +62,17 @@ prost-build = "0.12.4"
|
|||||||
prost-reflect-build = "0.13.0"
|
prost-reflect-build = "0.13.0"
|
||||||
prost-types = "0.12.4"
|
prost-types = "0.12.4"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
ratatui = "0.26.1"
|
ratatui = "0.26.2"
|
||||||
redb = "2.0.0"
|
redb = "2.1.0"
|
||||||
rtnetlink = "0.14.1"
|
rtnetlink = "0.14.1"
|
||||||
scopeguard = "1.2.0"
|
scopeguard = "1.2.0"
|
||||||
serde_json = "1.0.113"
|
serde_json = "1.0.116"
|
||||||
serde_yaml = "0.9"
|
serde_yaml = "0.9"
|
||||||
sha256 = "1.5.0"
|
sha256 = "1.5.0"
|
||||||
signal-hook = "0.3.17"
|
signal-hook = "0.3.17"
|
||||||
slice-copy = "0.3.0"
|
slice-copy = "0.3.0"
|
||||||
smoltcp = "0.11.0"
|
smoltcp = "0.11.0"
|
||||||
sysinfo = "0.30.10"
|
sysinfo = "0.30.11"
|
||||||
termtree = "0.4.1"
|
termtree = "0.4.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tokio-tun = "0.11.4"
|
tokio-tun = "0.11.4"
|
||||||
@ -91,12 +92,12 @@ version = "0.13.1"
|
|||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.reqwest]
|
[workspace.dependencies.reqwest]
|
||||||
version = "0.12.3"
|
version = "0.12.4"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["rustls-tls"]
|
features = ["rustls-tls"]
|
||||||
|
|
||||||
[workspace.dependencies.serde]
|
[workspace.dependencies.serde]
|
||||||
version = "1.0.196"
|
version = "1.0.198"
|
||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.sys-mount]
|
[workspace.dependencies.sys-mount]
|
||||||
|
4
DEV.md
4
DEV.md
@ -69,8 +69,8 @@ $ ./hack/kernel/build.sh
|
|||||||
```
|
```
|
||||||
|
|
||||||
7. Copy the guest kernel image at `target/kernel/kernel-x86_64` to `/var/lib/krata/guest/kernel` to have it automatically detected by kratad.
|
7. Copy the guest kernel image at `target/kernel/kernel-x86_64` to `/var/lib/krata/guest/kernel` to have it automatically detected by kratad.
|
||||||
8. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
||||||
9. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
||||||
10. Run kratactl to launch a guest:
|
10. Run kratactl to launch a guest:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -11,6 +11,7 @@ resolver = "2"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
async-stream = { workspace = true }
|
async-stream = { workspace = true }
|
||||||
|
base64 = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
comfy-table = { workspace = true }
|
comfy-table = { workspace = true }
|
||||||
crossterm = { workspace = true, features = ["event-stream"] }
|
crossterm = { workspace = true, features = ["event-stream"] }
|
||||||
@ -19,11 +20,12 @@ env_logger = { workspace = true }
|
|||||||
fancy-duration = { workspace = true }
|
fancy-duration = { workspace = true }
|
||||||
human_bytes = { workspace = true }
|
human_bytes = { workspace = true }
|
||||||
indicatif = { workspace = true }
|
indicatif = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.9" }
|
krata = { path = "../krata", version = "^0.0.10" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost-reflect = { workspace = true, features = ["serde"] }
|
prost-reflect = { workspace = true, features = ["serde"] }
|
||||||
prost-types = { workspace = true }
|
prost-types = { workspace = true }
|
||||||
ratatui = { workspace = true }
|
ratatui = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
serde_yaml = { workspace = true }
|
serde_yaml = { workspace = true }
|
||||||
termtree = { workspace = true }
|
termtree = { workspace = true }
|
||||||
|
70
crates/ctl/src/cli/exec.rs
Normal file
70
crates/ctl/src/cli/exec.rs
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use krata::v1::{
|
||||||
|
common::{GuestTaskSpec, GuestTaskSpecEnvVar},
|
||||||
|
control::{control_service_client::ControlServiceClient, ExecGuestRequest},
|
||||||
|
};
|
||||||
|
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
|
use super::resolve_guest;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Execute a command inside the guest")]
|
||||||
|
pub struct ExecCommand {
|
||||||
|
#[arg[short, long, help = "Environment variables"]]
|
||||||
|
env: Option<Vec<String>>,
|
||||||
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
|
working_directory: Option<String>,
|
||||||
|
#[arg(help = "Guest to exec inside, either the name or the uuid")]
|
||||||
|
guest: String,
|
||||||
|
#[arg(
|
||||||
|
allow_hyphen_values = true,
|
||||||
|
trailing_var_arg = true,
|
||||||
|
help = "Command to run inside the guest"
|
||||||
|
)]
|
||||||
|
command: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
||||||
|
let initial = ExecGuestRequest {
|
||||||
|
guest_id,
|
||||||
|
task: Some(GuestTaskSpec {
|
||||||
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| GuestTaskSpecEnvVar {
|
||||||
|
key: key.clone(),
|
||||||
|
value: value.clone(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
command: self.command,
|
||||||
|
working_directory: self.working_directory.unwrap_or_default(),
|
||||||
|
}),
|
||||||
|
data: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
||||||
|
|
||||||
|
let response = client.exec_guest(Request::new(stream)).await?.into_inner();
|
||||||
|
|
||||||
|
let code = StdioConsoleStream::exec_output(response).await?;
|
||||||
|
std::process::exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn env_map(env: &[String]) -> HashMap<String, String> {
|
||||||
|
let mut map = HashMap::<String, String>::new();
|
||||||
|
for item in env {
|
||||||
|
if let Some((key, value)) = item.split_once('=') {
|
||||||
|
map.insert(key.to_string(), value.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
map
|
||||||
|
}
|
22
crates/ctl/src/cli/identify_host.rs
Normal file
22
crates/ctl/src/cli/identify_host.rs
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use krata::v1::control::{control_service_client::ControlServiceClient, IdentifyHostRequest};
|
||||||
|
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Identify information about the host")]
|
||||||
|
pub struct IdentifyHostCommand {}
|
||||||
|
|
||||||
|
impl IdentifyHostCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let response = client
|
||||||
|
.identify_host(Request::new(IdentifyHostRequest {}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
println!("Host UUID: {}", response.host_uuid);
|
||||||
|
println!("Host Domain: {}", response.host_domid);
|
||||||
|
println!("Krata Version: {}", response.krata_version);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -1,14 +1,18 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use base64::Engine;
|
||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
|
idm::{internal, serialize::IdmSerializable, transport::IdmTransportPacketForm},
|
||||||
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
|
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, value2kv};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum IdmSnoopFormat {
|
enum IdmSnoopFormat {
|
||||||
@ -34,19 +38,22 @@ impl IdmSnoopCommand {
|
|||||||
|
|
||||||
while let Some(reply) = stream.next().await {
|
while let Some(reply) = stream.next().await {
|
||||||
let reply = reply?;
|
let reply = reply?;
|
||||||
|
let Some(line) = convert_idm_snoop(reply) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
match self.format {
|
match self.format {
|
||||||
IdmSnoopFormat::Simple => {
|
IdmSnoopFormat::Simple => {
|
||||||
self.print_simple(reply)?;
|
self.print_simple(line)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
IdmSnoopFormat::Jsonl => {
|
IdmSnoopFormat::Jsonl => {
|
||||||
let value = serde_json::to_value(proto2dynamic(reply)?)?;
|
let encoded = serde_json::to_string(&line)?;
|
||||||
let encoded = serde_json::to_string(&value)?;
|
|
||||||
println!("{}", encoded.trim());
|
println!("{}", encoded.trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
IdmSnoopFormat::KeyValue => {
|
IdmSnoopFormat::KeyValue => {
|
||||||
self.print_key_value(reply)?;
|
self.print_key_value(line)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -54,21 +61,97 @@ impl IdmSnoopCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_simple(&self, reply: SnoopIdmReply) -> Result<()> {
|
fn print_simple(&self, line: IdmSnoopLine) -> Result<()> {
|
||||||
let from = reply.from;
|
let encoded = if !line.packet.decoded.is_null() {
|
||||||
let to = reply.to;
|
serde_json::to_string(&line.packet.decoded)?
|
||||||
let Some(packet) = reply.packet else {
|
} else {
|
||||||
return Ok(());
|
base64::prelude::BASE64_STANDARD.encode(&line.packet.data)
|
||||||
};
|
};
|
||||||
let value = serde_json::to_value(proto2dynamic(packet)?)?;
|
println!(
|
||||||
let encoded = serde_json::to_string(&value)?;
|
"({} -> {}) {} {} {}",
|
||||||
println!("({} -> {}) {}", from, to, encoded);
|
line.from, line.to, line.packet.id, line.packet.form, encoded
|
||||||
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_key_value(&self, reply: SnoopIdmReply) -> Result<()> {
|
fn print_key_value(&self, line: IdmSnoopLine) -> Result<()> {
|
||||||
let kvs = proto2kv(reply)?;
|
let kvs = value2kv(serde_json::to_value(line)?)?;
|
||||||
println!("{}", kv2line(kvs));
|
println!("{}", kv2line(kvs));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct IdmSnoopLine {
|
||||||
|
pub from: String,
|
||||||
|
pub to: String,
|
||||||
|
pub packet: IdmSnoopData,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct IdmSnoopData {
|
||||||
|
pub id: u64,
|
||||||
|
pub channel: u64,
|
||||||
|
pub form: String,
|
||||||
|
pub data: String,
|
||||||
|
pub decoded: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn convert_idm_snoop(reply: SnoopIdmReply) -> Option<IdmSnoopLine> {
|
||||||
|
let packet = &(reply.packet?);
|
||||||
|
|
||||||
|
let decoded = if packet.channel == 0 {
|
||||||
|
match packet.form() {
|
||||||
|
IdmTransportPacketForm::Event => internal::Event::decode(&packet.data)
|
||||||
|
.ok()
|
||||||
|
.and_then(|event| proto2dynamic(event).ok()),
|
||||||
|
|
||||||
|
IdmTransportPacketForm::Request
|
||||||
|
| IdmTransportPacketForm::StreamRequest
|
||||||
|
| IdmTransportPacketForm::StreamRequestUpdate => {
|
||||||
|
internal::Request::decode(&packet.data)
|
||||||
|
.ok()
|
||||||
|
.and_then(|event| proto2dynamic(event).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
IdmTransportPacketForm::Response | IdmTransportPacketForm::StreamResponseUpdate => {
|
||||||
|
internal::Response::decode(&packet.data)
|
||||||
|
.ok()
|
||||||
|
.and_then(|event| proto2dynamic(event).ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let decoded = decoded
|
||||||
|
.and_then(|message| serde_json::to_value(message).ok())
|
||||||
|
.unwrap_or(Value::Null);
|
||||||
|
|
||||||
|
let data = IdmSnoopData {
|
||||||
|
id: packet.id,
|
||||||
|
channel: packet.channel,
|
||||||
|
form: match packet.form() {
|
||||||
|
IdmTransportPacketForm::Raw => "raw".to_string(),
|
||||||
|
IdmTransportPacketForm::Event => "event".to_string(),
|
||||||
|
IdmTransportPacketForm::Request => "request".to_string(),
|
||||||
|
IdmTransportPacketForm::Response => "response".to_string(),
|
||||||
|
IdmTransportPacketForm::StreamRequest => "stream-request".to_string(),
|
||||||
|
IdmTransportPacketForm::StreamRequestUpdate => "stream-request-update".to_string(),
|
||||||
|
IdmTransportPacketForm::StreamRequestClosed => "stream-request-closed".to_string(),
|
||||||
|
IdmTransportPacketForm::StreamResponseUpdate => "stream-response-update".to_string(),
|
||||||
|
IdmTransportPacketForm::StreamResponseClosed => "stream-response-closed".to_string(),
|
||||||
|
_ => format!("unknown-{}", packet.form),
|
||||||
|
},
|
||||||
|
data: base64::prelude::BASE64_STANDARD.encode(&packet.data),
|
||||||
|
decoded,
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(IdmSnoopLine {
|
||||||
|
from: reply.from,
|
||||||
|
to: reply.to,
|
||||||
|
packet: data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::Parser;
|
use clap::{Parser, ValueEnum};
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{
|
common::{
|
||||||
guest_image_spec::Image, GuestImageSpec, GuestOciImageFormat, GuestOciImageSpec,
|
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestStatus,
|
||||||
GuestSpec, GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar,
|
GuestTaskSpec, GuestTaskSpecEnvVar, OciImageFormat,
|
||||||
},
|
},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
@ -21,13 +21,19 @@ use tonic::{transport::Channel, Request};
|
|||||||
|
|
||||||
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
|
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
|
||||||
|
|
||||||
use super::pull::PullImageFormat;
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum LaunchImageFormat {
|
||||||
|
Squashfs,
|
||||||
|
Erofs,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Launch a new guest")]
|
#[command(about = "Launch a new guest")]
|
||||||
pub struct LauchCommand {
|
pub struct LaunchCommand {
|
||||||
#[arg(short = 'S', long, default_value = "squashfs", help = "Image format")]
|
#[arg(long, default_value = "squashfs", help = "Image format")]
|
||||||
image_format: PullImageFormat,
|
image_format: LaunchImageFormat,
|
||||||
|
#[arg(long, help = "Overwrite image cache on pull")]
|
||||||
|
pull_overwrite_cache: bool,
|
||||||
#[arg(short, long, help = "Name of the guest")]
|
#[arg(short, long, help = "Name of the guest")]
|
||||||
name: Option<String>,
|
name: Option<String>,
|
||||||
#[arg(
|
#[arg(
|
||||||
@ -58,6 +64,12 @@ pub struct LauchCommand {
|
|||||||
help = "Wait for the guest to start, implied by --attach"
|
help = "Wait for the guest to start, implied by --attach"
|
||||||
)]
|
)]
|
||||||
wait: bool,
|
wait: bool,
|
||||||
|
#[arg(short = 'k', long, help = "OCI kernel image for guest to use")]
|
||||||
|
kernel: Option<String>,
|
||||||
|
#[arg(short = 'I', long, help = "OCI initrd image for guest to use")]
|
||||||
|
initrd: Option<String>,
|
||||||
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
|
working_directory: Option<String>,
|
||||||
#[arg(help = "Container image for guest to use")]
|
#[arg(help = "Container image for guest to use")]
|
||||||
oci: String,
|
oci: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
@ -68,32 +80,47 @@ pub struct LauchCommand {
|
|||||||
command: Vec<String>,
|
command: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LauchCommand {
|
impl LaunchCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let response = client
|
let image = self
|
||||||
.pull_image(PullImageRequest {
|
.pull_image(
|
||||||
image: self.oci.clone(),
|
&mut client,
|
||||||
format: match self.image_format {
|
&self.oci,
|
||||||
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
match self.image_format {
|
||||||
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
LaunchImageFormat::Squashfs => OciImageFormat::Squashfs,
|
||||||
|
LaunchImageFormat::Erofs => OciImageFormat::Erofs,
|
||||||
},
|
},
|
||||||
})
|
)
|
||||||
.await?;
|
.await?;
|
||||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
|
||||||
|
let kernel = if let Some(ref kernel) = self.kernel {
|
||||||
|
let kernel_image = self
|
||||||
|
.pull_image(&mut client, kernel, OciImageFormat::Tar)
|
||||||
|
.await?;
|
||||||
|
Some(kernel_image)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let initrd = if let Some(ref initrd) = self.initrd {
|
||||||
|
let kernel_image = self
|
||||||
|
.pull_image(&mut client, initrd, OciImageFormat::Tar)
|
||||||
|
.await?;
|
||||||
|
Some(kernel_image)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let request = CreateGuestRequest {
|
let request = CreateGuestRequest {
|
||||||
spec: Some(GuestSpec {
|
spec: Some(GuestSpec {
|
||||||
name: self.name.unwrap_or_default(),
|
name: self.name.unwrap_or_default(),
|
||||||
image: Some(GuestImageSpec {
|
image: Some(image),
|
||||||
image: Some(Image::Oci(GuestOciImageSpec {
|
kernel,
|
||||||
digest: reply.digest,
|
initrd,
|
||||||
format: reply.format,
|
|
||||||
})),
|
|
||||||
}),
|
|
||||||
vcpus: self.cpus,
|
vcpus: self.cpus,
|
||||||
mem: self.mem,
|
mem: self.mem,
|
||||||
task: Some(GuestTaskSpec {
|
task: Some(GuestTaskSpec {
|
||||||
@ -105,6 +132,7 @@ impl LauchCommand {
|
|||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
command: self.command,
|
command: self.command,
|
||||||
|
working_directory: self.working_directory.unwrap_or_default(),
|
||||||
}),
|
}),
|
||||||
annotations: vec![],
|
annotations: vec![],
|
||||||
}),
|
}),
|
||||||
@ -139,6 +167,28 @@ impl LauchCommand {
|
|||||||
StdioConsoleStream::restore_terminal_mode();
|
StdioConsoleStream::restore_terminal_mode();
|
||||||
std::process::exit(code.unwrap_or(0));
|
std::process::exit(code.unwrap_or(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn pull_image(
|
||||||
|
&self,
|
||||||
|
client: &mut ControlServiceClient<Channel>,
|
||||||
|
image: &str,
|
||||||
|
format: OciImageFormat,
|
||||||
|
) -> Result<GuestImageSpec> {
|
||||||
|
let response = client
|
||||||
|
.pull_image(PullImageRequest {
|
||||||
|
image: image.to_string(),
|
||||||
|
format: format.into(),
|
||||||
|
overwrite_cache: self.pull_overwrite_cache,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||||
|
Ok(GuestImageSpec {
|
||||||
|
image: Some(Image::Oci(GuestOciImageSpec {
|
||||||
|
digest: reply.digest,
|
||||||
|
format: reply.format,
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {
|
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
pub mod attach;
|
pub mod attach;
|
||||||
pub mod destroy;
|
pub mod destroy;
|
||||||
|
pub mod exec;
|
||||||
|
pub mod identify_host;
|
||||||
pub mod idm_snoop;
|
pub mod idm_snoop;
|
||||||
pub mod launch;
|
pub mod launch;
|
||||||
pub mod list;
|
pub mod list;
|
||||||
@ -20,9 +22,10 @@ use krata::{
|
|||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
attach::AttachCommand, destroy::DestroyCommand, idm_snoop::IdmSnoopCommand,
|
attach::AttachCommand, destroy::DestroyCommand, exec::ExecCommand,
|
||||||
launch::LauchCommand, list::ListCommand, logs::LogsCommand, metrics::MetricsCommand,
|
identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, launch::LaunchCommand,
|
||||||
pull::PullCommand, resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
|
list::ListCommand, logs::LogsCommand, metrics::MetricsCommand, pull::PullCommand,
|
||||||
|
resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@ -45,7 +48,7 @@ pub struct ControlCommand {
|
|||||||
|
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
pub enum Commands {
|
pub enum Commands {
|
||||||
Launch(LauchCommand),
|
Launch(LaunchCommand),
|
||||||
Destroy(DestroyCommand),
|
Destroy(DestroyCommand),
|
||||||
List(ListCommand),
|
List(ListCommand),
|
||||||
Attach(AttachCommand),
|
Attach(AttachCommand),
|
||||||
@ -56,6 +59,8 @@ pub enum Commands {
|
|||||||
Metrics(MetricsCommand),
|
Metrics(MetricsCommand),
|
||||||
IdmSnoop(IdmSnoopCommand),
|
IdmSnoop(IdmSnoopCommand),
|
||||||
Top(TopCommand),
|
Top(TopCommand),
|
||||||
|
IdentifyHost(IdentifyHostCommand),
|
||||||
|
Exec(ExecCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ControlCommand {
|
impl ControlCommand {
|
||||||
@ -107,6 +112,14 @@ impl ControlCommand {
|
|||||||
Commands::Pull(pull) => {
|
Commands::Pull(pull) => {
|
||||||
pull.run(client).await?;
|
pull.run(client).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Commands::IdentifyHost(identify) => {
|
||||||
|
identify.run(client).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Commands::Exec(exec) => {
|
||||||
|
exec.run(client).await?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::GuestOciImageFormat,
|
common::OciImageFormat,
|
||||||
control::{control_service_client::ControlServiceClient, PullImageRequest},
|
control::{control_service_client::ControlServiceClient, PullImageRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -13,6 +13,7 @@ use crate::pull::pull_interactive_progress;
|
|||||||
pub enum PullImageFormat {
|
pub enum PullImageFormat {
|
||||||
Squashfs,
|
Squashfs,
|
||||||
Erofs,
|
Erofs,
|
||||||
|
Tar,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@ -22,6 +23,8 @@ pub struct PullCommand {
|
|||||||
image: String,
|
image: String,
|
||||||
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
|
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
|
||||||
image_format: PullImageFormat,
|
image_format: PullImageFormat,
|
||||||
|
#[arg(short = 'o', long, help = "Overwrite image cache")]
|
||||||
|
overwrite_cache: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PullCommand {
|
impl PullCommand {
|
||||||
@ -30,9 +33,11 @@ impl PullCommand {
|
|||||||
.pull_image(PullImageRequest {
|
.pull_image(PullImageRequest {
|
||||||
image: self.image.clone(),
|
image: self.image.clone(),
|
||||||
format: match self.image_format {
|
format: match self.image_format {
|
||||||
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
PullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
||||||
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
PullImageFormat::Erofs => OciImageFormat::Erofs.into(),
|
||||||
|
PullImageFormat::Tar => OciImageFormat::Tar.into(),
|
||||||
},
|
},
|
||||||
|
overwrite_cache: self.overwrite_cache,
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
use crossterm::{
|
use crossterm::{
|
||||||
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
|
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
|
||||||
@ -8,12 +8,15 @@ use krata::{
|
|||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestStatus,
|
common::GuestStatus,
|
||||||
control::{watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest},
|
control::{
|
||||||
|
watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest, ExecGuestReply,
|
||||||
|
ExecGuestRequest,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{stdin, stdout, AsyncReadExt, AsyncWriteExt},
|
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
};
|
};
|
||||||
use tokio_stream::{Stream, StreamExt};
|
use tokio_stream::{Stream, StreamExt};
|
||||||
@ -45,6 +48,31 @@ impl StdioConsoleStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn stdin_stream_exec(
|
||||||
|
initial: ExecGuestRequest,
|
||||||
|
) -> impl Stream<Item = ExecGuestRequest> {
|
||||||
|
let mut stdin = stdin();
|
||||||
|
stream! {
|
||||||
|
yield initial;
|
||||||
|
|
||||||
|
let mut buffer = vec![0u8; 60];
|
||||||
|
loop {
|
||||||
|
let size = match stdin.read(&mut buffer).await {
|
||||||
|
Ok(size) => size,
|
||||||
|
Err(error) => {
|
||||||
|
debug!("failed to read stdin: {}", error);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let data = buffer[0..size].to_vec();
|
||||||
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
yield ExecGuestRequest { guest_id: String::default(), task: None, data };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
|
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
|
||||||
if stdin().is_tty() {
|
if stdin().is_tty() {
|
||||||
enable_raw_mode()?;
|
enable_raw_mode()?;
|
||||||
@ -62,6 +90,32 @@ impl StdioConsoleStream {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn exec_output(mut stream: Streaming<ExecGuestReply>) -> Result<i32> {
|
||||||
|
let mut stdout = stdout();
|
||||||
|
let mut stderr = stderr();
|
||||||
|
while let Some(reply) = stream.next().await {
|
||||||
|
let reply = reply?;
|
||||||
|
if !reply.stdout.is_empty() {
|
||||||
|
stdout.write_all(&reply.stdout).await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reply.stderr.is_empty() {
|
||||||
|
stderr.write_all(&reply.stderr).await?;
|
||||||
|
stderr.flush().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if reply.exited {
|
||||||
|
if reply.error.is_empty() {
|
||||||
|
return Ok(reply.exit_code);
|
||||||
|
} else {
|
||||||
|
return Err(anyhow!("exec failed: {}", reply.error));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(-1)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn guest_exit_hook(
|
pub async fn guest_exit_hook(
|
||||||
id: String,
|
id: String,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
|
@ -4,7 +4,7 @@ use anyhow::Result;
|
|||||||
use fancy_duration::FancyDuration;
|
use fancy_duration::FancyDuration;
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
|
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
|
||||||
use prost_reflect::{DynamicMessage, FieldDescriptor, ReflectMessage, Value as ReflectValue};
|
use prost_reflect::{DynamicMessage, ReflectMessage};
|
||||||
use prost_types::Value;
|
use prost_types::Value;
|
||||||
use termtree::Tree;
|
use termtree::Tree;
|
||||||
|
|
||||||
@ -15,64 +15,59 @@ pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
|
|||||||
)?)
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
|
pub fn value2kv(value: serde_json::Value) -> Result<HashMap<String, String>> {
|
||||||
let message = proto2dynamic(proto)?;
|
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
|
fn crawl(prefix: String, map: &mut HashMap<String, String>, value: serde_json::Value) {
|
||||||
|
fn dot(prefix: &str, next: String) -> String {
|
||||||
|
if prefix.is_empty() {
|
||||||
|
next.to_string()
|
||||||
|
} else {
|
||||||
|
format!("{}.{}", prefix, next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn crawl(
|
|
||||||
prefix: String,
|
|
||||||
field: Option<&FieldDescriptor>,
|
|
||||||
map: &mut HashMap<String, String>,
|
|
||||||
value: &ReflectValue,
|
|
||||||
) {
|
|
||||||
match value {
|
match value {
|
||||||
ReflectValue::Message(child) => {
|
serde_json::Value::Null => {
|
||||||
for (field, field_value) in child.fields() {
|
map.insert(prefix, "null".to_string());
|
||||||
let path = if prefix.is_empty() {
|
}
|
||||||
field.json_name().to_string()
|
|
||||||
} else {
|
serde_json::Value::String(value) => {
|
||||||
format!("{}.{}", prefix, field.json_name())
|
map.insert(prefix, value);
|
||||||
};
|
}
|
||||||
crawl(path, Some(&field), map, field_value);
|
|
||||||
|
serde_json::Value::Bool(value) => {
|
||||||
|
map.insert(prefix, value.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
serde_json::Value::Number(value) => {
|
||||||
|
map.insert(prefix, value.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
serde_json::Value::Array(value) => {
|
||||||
|
for (i, item) in value.into_iter().enumerate() {
|
||||||
|
let next = dot(&prefix, i.to_string());
|
||||||
|
crawl(next, map, item);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ReflectValue::EnumNumber(number) => {
|
serde_json::Value::Object(value) => {
|
||||||
if let Some(kind) = field.map(|x| x.kind()) {
|
for (key, item) in value {
|
||||||
if let Some(e) = kind.as_enum() {
|
let next = dot(&prefix, key);
|
||||||
if let Some(value) = e.get_value(*number) {
|
crawl(next, map, item);
|
||||||
map.insert(prefix, value.name().to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ReflectValue::String(value) => {
|
|
||||||
map.insert(prefix.to_string(), value.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
ReflectValue::List(value) => {
|
|
||||||
for (x, value) in value.iter().enumerate() {
|
|
||||||
crawl(format!("{}.{}", prefix, x), field, map, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => {
|
|
||||||
map.insert(prefix.to_string(), value.to_string());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
crawl("".to_string(), &mut map, value);
|
||||||
crawl(
|
|
||||||
"".to_string(),
|
|
||||||
None,
|
|
||||||
&mut map,
|
|
||||||
&ReflectValue::Message(message),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(map)
|
Ok(map)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
|
||||||
|
let message = proto2dynamic(proto)?;
|
||||||
|
let value = serde_json::to_value(message)?;
|
||||||
|
value2kv(value)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn kv2line(map: HashMap<String, String>) -> String {
|
pub fn kv2line(map: HashMap<String, String>) -> String {
|
||||||
map.iter()
|
map.iter()
|
||||||
.map(|(k, v)| format!("{}=\"{}\"", k, v.replace('"', "\\\"")))
|
.map(|(k, v)| format!("{}=\"{}\"", k, v.replace('"', "\\\"")))
|
||||||
|
@ -1,20 +1,205 @@
|
|||||||
use std::collections::HashMap;
|
use std::{
|
||||||
|
collections::{hash_map::Entry, HashMap},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||||
use krata::v1::control::{PullImageProgressLayerPhase, PullImageProgressPhase, PullImageReply};
|
use krata::v1::control::{
|
||||||
|
image_progress_indication::Indication, ImageProgressIndication, ImageProgressLayerPhase,
|
||||||
|
ImageProgressPhase, PullImageReply,
|
||||||
|
};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
use tonic::Streaming;
|
use tonic::Streaming;
|
||||||
|
|
||||||
|
const SPINNER_STRINGS: &[&str] = &[
|
||||||
|
"[= ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ = ]",
|
||||||
|
"[ =]",
|
||||||
|
"[====================]",
|
||||||
|
];
|
||||||
|
|
||||||
|
fn progress_bar_for_indication(indication: &ImageProgressIndication) -> Option<ProgressBar> {
|
||||||
|
match indication.indication.as_ref() {
|
||||||
|
Some(Indication::Hidden(_)) | None => None,
|
||||||
|
Some(Indication::Bar(indic)) => {
|
||||||
|
let bar = ProgressBar::new(indic.total);
|
||||||
|
bar.enable_steady_tick(Duration::from_millis(100));
|
||||||
|
Some(bar)
|
||||||
|
}
|
||||||
|
Some(Indication::Spinner(_)) => {
|
||||||
|
let bar = ProgressBar::new_spinner();
|
||||||
|
bar.enable_steady_tick(Duration::from_millis(100));
|
||||||
|
Some(bar)
|
||||||
|
}
|
||||||
|
Some(Indication::Completed(indic)) => {
|
||||||
|
let bar = ProgressBar::new_spinner();
|
||||||
|
bar.enable_steady_tick(Duration::from_millis(100));
|
||||||
|
if !indic.message.is_empty() {
|
||||||
|
bar.finish_with_message(indic.message.clone());
|
||||||
|
} else {
|
||||||
|
bar.finish()
|
||||||
|
}
|
||||||
|
Some(bar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn configure_for_indication(
|
||||||
|
bar: &mut ProgressBar,
|
||||||
|
multi_progress: &mut MultiProgress,
|
||||||
|
indication: &ImageProgressIndication,
|
||||||
|
top_phase: Option<ImageProgressPhase>,
|
||||||
|
layer_phase: Option<ImageProgressLayerPhase>,
|
||||||
|
layer_id: Option<&str>,
|
||||||
|
) {
|
||||||
|
let prefix = if let Some(phase) = top_phase {
|
||||||
|
match phase {
|
||||||
|
ImageProgressPhase::Unknown => "unknown",
|
||||||
|
ImageProgressPhase::Started => "started",
|
||||||
|
ImageProgressPhase::Resolving => "resolving",
|
||||||
|
ImageProgressPhase::Resolved => "resolved",
|
||||||
|
ImageProgressPhase::ConfigDownload => "downloading",
|
||||||
|
ImageProgressPhase::LayerDownload => "downloading",
|
||||||
|
ImageProgressPhase::Assemble => "assembling",
|
||||||
|
ImageProgressPhase::Pack => "packing",
|
||||||
|
ImageProgressPhase::Complete => "complete",
|
||||||
|
}
|
||||||
|
} else if let Some(phase) = layer_phase {
|
||||||
|
match phase {
|
||||||
|
ImageProgressLayerPhase::Unknown => "unknown",
|
||||||
|
ImageProgressLayerPhase::Waiting => "waiting",
|
||||||
|
ImageProgressLayerPhase::Downloading => "downloading",
|
||||||
|
ImageProgressLayerPhase::Downloaded => "downloaded",
|
||||||
|
ImageProgressLayerPhase::Extracting => "extracting",
|
||||||
|
ImageProgressLayerPhase::Extracted => "extracted",
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
""
|
||||||
|
};
|
||||||
|
let prefix = prefix.to_string();
|
||||||
|
|
||||||
|
let id = if let Some(layer_id) = layer_id {
|
||||||
|
let hash = if let Some((_, hash)) = layer_id.split_once(':') {
|
||||||
|
hash
|
||||||
|
} else {
|
||||||
|
"unknown"
|
||||||
|
};
|
||||||
|
let small_hash = if hash.len() > 10 { &hash[0..10] } else { hash };
|
||||||
|
Some(format!("{:width$}", small_hash, width = 10))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let prefix = if let Some(id) = id {
|
||||||
|
format!("{} {:width$}", id, prefix, width = 11)
|
||||||
|
} else {
|
||||||
|
format!(" {:width$}", prefix, width = 11)
|
||||||
|
};
|
||||||
|
|
||||||
|
match indication.indication.as_ref() {
|
||||||
|
Some(Indication::Hidden(_)) | None => {
|
||||||
|
multi_progress.remove(bar);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Some(Indication::Bar(indic)) => {
|
||||||
|
if indic.is_bytes {
|
||||||
|
bar.set_style(ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) eta: {eta}").unwrap().progress_chars("=>-"));
|
||||||
|
} else {
|
||||||
|
bar.set_style(
|
||||||
|
ProgressStyle::with_template(
|
||||||
|
"{prefix} [{bar:20} {msg} {human_pos}/{human_len} ({per_sec}/sec)",
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.progress_chars("=>-"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
bar.set_message(indic.message.clone());
|
||||||
|
bar.set_position(indic.current);
|
||||||
|
bar.set_length(indic.total);
|
||||||
|
}
|
||||||
|
Some(Indication::Spinner(indic)) => {
|
||||||
|
bar.set_style(
|
||||||
|
ProgressStyle::with_template("{prefix} {spinner} {msg}")
|
||||||
|
.unwrap()
|
||||||
|
.tick_strings(SPINNER_STRINGS),
|
||||||
|
);
|
||||||
|
bar.set_message(indic.message.clone());
|
||||||
|
}
|
||||||
|
Some(Indication::Completed(indic)) => {
|
||||||
|
if bar.is_finished() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
bar.disable_steady_tick();
|
||||||
|
bar.set_message(indic.message.clone());
|
||||||
|
if indic.total != 0 {
|
||||||
|
bar.set_position(indic.total);
|
||||||
|
bar.set_length(indic.total);
|
||||||
|
}
|
||||||
|
if bar.style().get_tick_str(0).contains('=') {
|
||||||
|
bar.set_style(
|
||||||
|
ProgressStyle::with_template("{prefix} {spinner} {msg}")
|
||||||
|
.unwrap()
|
||||||
|
.tick_strings(SPINNER_STRINGS),
|
||||||
|
);
|
||||||
|
bar.finish_with_message(indic.message.clone());
|
||||||
|
} else if indic.is_bytes {
|
||||||
|
bar.set_style(
|
||||||
|
ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_total_bytes}")
|
||||||
|
.unwrap()
|
||||||
|
.progress_chars("=>-"),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
bar.set_style(
|
||||||
|
ProgressStyle::with_template("{prefix} [{bar:20}] {msg}")
|
||||||
|
.unwrap()
|
||||||
|
.progress_chars("=>-"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
bar.tick();
|
||||||
|
bar.enable_steady_tick(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
bar.set_prefix(prefix);
|
||||||
|
bar.tick();
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn pull_interactive_progress(
|
pub async fn pull_interactive_progress(
|
||||||
mut stream: Streaming<PullImageReply>,
|
mut stream: Streaming<PullImageReply>,
|
||||||
) -> Result<PullImageReply> {
|
) -> Result<PullImageReply> {
|
||||||
let mut multi_progress: Option<(MultiProgress, HashMap<String, ProgressBar>)> = None;
|
let mut multi_progress = MultiProgress::new();
|
||||||
|
multi_progress.set_move_cursor(false);
|
||||||
|
let mut progresses = HashMap::new();
|
||||||
|
|
||||||
while let Some(reply) = stream.next().await {
|
while let Some(reply) = stream.next().await {
|
||||||
let reply = reply?;
|
let reply = match reply {
|
||||||
|
Ok(reply) => reply,
|
||||||
|
Err(error) => {
|
||||||
|
multi_progress.clear()?;
|
||||||
|
return Err(error.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if reply.progress.is_none() && !reply.digest.is_empty() {
|
if reply.progress.is_none() && !reply.digest.is_empty() {
|
||||||
|
multi_progress.clear()?;
|
||||||
return Ok(reply);
|
return Ok(reply);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,97 +207,62 @@ pub async fn pull_interactive_progress(
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if multi_progress.is_none() {
|
for layer in &oci.layers {
|
||||||
multi_progress = Some((MultiProgress::new(), HashMap::new()));
|
let Some(ref indication) = layer.indication else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let bar = match progresses.entry(layer.id.clone()) {
|
||||||
|
Entry::Occupied(entry) => Some(entry.into_mut()),
|
||||||
|
|
||||||
|
Entry::Vacant(entry) => {
|
||||||
|
if let Some(bar) = progress_bar_for_indication(indication) {
|
||||||
|
multi_progress.add(bar.clone());
|
||||||
|
Some(entry.insert(bar))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(bar) = bar {
|
||||||
|
configure_for_indication(
|
||||||
|
bar,
|
||||||
|
&mut multi_progress,
|
||||||
|
indication,
|
||||||
|
None,
|
||||||
|
Some(layer.phase()),
|
||||||
|
Some(&layer.id),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some((multi_progress, progresses)) = multi_progress.as_mut() else {
|
if let Some(ref indication) = oci.indication {
|
||||||
continue;
|
let bar = match progresses.entry("root".to_string()) {
|
||||||
};
|
Entry::Occupied(entry) => Some(entry.into_mut()),
|
||||||
|
|
||||||
match oci.phase() {
|
Entry::Vacant(entry) => {
|
||||||
PullImageProgressPhase::Resolved
|
if let Some(bar) = progress_bar_for_indication(indication) {
|
||||||
| PullImageProgressPhase::ConfigAcquire
|
multi_progress.add(bar.clone());
|
||||||
| PullImageProgressPhase::LayerAcquire => {
|
Some(entry.insert(bar))
|
||||||
if progresses.is_empty() && !oci.layers.is_empty() {
|
|
||||||
for layer in &oci.layers {
|
|
||||||
let bar = ProgressBar::new(layer.total);
|
|
||||||
bar.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
|
|
||||||
progresses.insert(layer.id.clone(), bar.clone());
|
|
||||||
multi_progress.add(bar);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for layer in oci.layers {
|
|
||||||
let Some(progress) = progresses.get_mut(&layer.id) else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let phase = match layer.phase() {
|
|
||||||
PullImageProgressLayerPhase::Waiting => "waiting",
|
|
||||||
PullImageProgressLayerPhase::Downloading => "downloading",
|
|
||||||
PullImageProgressLayerPhase::Downloaded => "downloaded",
|
|
||||||
PullImageProgressLayerPhase::Extracting => "extracting",
|
|
||||||
PullImageProgressLayerPhase::Extracted => "extracted",
|
|
||||||
_ => "unknown",
|
|
||||||
};
|
|
||||||
|
|
||||||
let simple = if let Some((_, hash)) = layer.id.split_once(':') {
|
|
||||||
hash
|
|
||||||
} else {
|
} else {
|
||||||
"unknown"
|
None
|
||||||
};
|
|
||||||
let simple = if simple.len() > 10 {
|
|
||||||
&simple[0..10]
|
|
||||||
} else {
|
|
||||||
simple
|
|
||||||
};
|
|
||||||
let message = format!(
|
|
||||||
"{:width$} {:phwidth$}",
|
|
||||||
simple,
|
|
||||||
phase,
|
|
||||||
width = 10,
|
|
||||||
phwidth = 11
|
|
||||||
);
|
|
||||||
|
|
||||||
if message != progress.message() {
|
|
||||||
progress.set_message(message);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
progress.update(|state| {
|
|
||||||
state.set_len(layer.total);
|
|
||||||
state.set_pos(layer.value);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(bar) = bar {
|
||||||
|
configure_for_indication(
|
||||||
|
bar,
|
||||||
|
&mut multi_progress,
|
||||||
|
indication,
|
||||||
|
Some(oci.phase()),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
PullImageProgressPhase::Packing => {
|
|
||||||
for (key, bar) in &mut *progresses {
|
|
||||||
if key == "packing" {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
bar.finish_and_clear();
|
|
||||||
multi_progress.remove(bar);
|
|
||||||
}
|
|
||||||
progresses.retain(|k, _| k == "packing");
|
|
||||||
if progresses.is_empty() {
|
|
||||||
let progress = ProgressBar::new(100);
|
|
||||||
progress.set_message("packing ");
|
|
||||||
progress.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
|
|
||||||
progresses.insert("packing".to_string(), progress);
|
|
||||||
}
|
|
||||||
let Some(progress) = progresses.get("packing") else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
progress.update(|state| {
|
|
||||||
state.set_len(oci.total);
|
|
||||||
state.set_pos(oci.value);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
multi_progress.clear()?;
|
||||||
Err(anyhow!("never received final reply for image pull"))
|
Err(anyhow!("never received final reply for image pull"))
|
||||||
}
|
}
|
||||||
|
@ -17,15 +17,17 @@ circular-buffer = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.9" }
|
krata = { path = "../krata", version = "^0.0.10" }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.9" }
|
krata-oci = { path = "../oci", version = "^0.0.10" }
|
||||||
krata-runtime = { path = "../runtime", version = "^0.0.9" }
|
krata-runtime = { path = "../runtime", version = "^0.0.10" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost = { workspace = true }
|
prost = { workspace = true }
|
||||||
redb = { workspace = true }
|
redb = { workspace = true }
|
||||||
|
scopeguard = { workspace = true }
|
||||||
signal-hook = { workspace = true }
|
signal-hook = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-stream = { workspace = true }
|
tokio-stream = { workspace = true }
|
||||||
|
krata-tokio-tar = { workspace = true }
|
||||||
tonic = { workspace = true, features = ["tls"] }
|
tonic = { workspace = true, features = ["tls"] }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
|
|
||||||
|
@ -1,21 +1,9 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use env_logger::Env;
|
use env_logger::Env;
|
||||||
use krata::dial::ControlDialAddress;
|
use kratad::command::DaemonCommand;
|
||||||
use kratad::Daemon;
|
|
||||||
use log::LevelFilter;
|
use log::LevelFilter;
|
||||||
use std::{
|
use std::sync::{atomic::AtomicBool, Arc};
|
||||||
str::FromStr,
|
|
||||||
sync::{atomic::AtomicBool, Arc},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
struct DaemonCommand {
|
|
||||||
#[arg(short, long, default_value = "unix:///var/lib/krata/daemon.socket")]
|
|
||||||
listen: String,
|
|
||||||
#[arg(short, long, default_value = "/var/lib/krata")]
|
|
||||||
store: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main(flavor = "multi_thread", worker_threads = 10)]
|
#[tokio::main(flavor = "multi_thread", worker_threads = 10)]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
@ -24,12 +12,8 @@ async fn main() -> Result<()> {
|
|||||||
.init();
|
.init();
|
||||||
mask_sighup()?;
|
mask_sighup()?;
|
||||||
|
|
||||||
let args = DaemonCommand::parse();
|
let command = DaemonCommand::parse();
|
||||||
let addr = ControlDialAddress::from_str(&args.listen)?;
|
command.run().await
|
||||||
|
|
||||||
let mut daemon = Daemon::new(args.store.clone()).await?;
|
|
||||||
daemon.listen(addr).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mask_sighup() -> Result<()> {
|
fn mask_sighup() -> Result<()> {
|
||||||
|
36
crates/daemon/src/command.rs
Normal file
36
crates/daemon/src/command.rs
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{CommandFactory, Parser};
|
||||||
|
use krata::dial::ControlDialAddress;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use crate::Daemon;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(version, about = "Krata hypervisor daemon")]
|
||||||
|
pub struct DaemonCommand {
|
||||||
|
#[arg(
|
||||||
|
short,
|
||||||
|
long,
|
||||||
|
default_value = "unix:///var/lib/krata/daemon.socket",
|
||||||
|
help = "Listen address"
|
||||||
|
)]
|
||||||
|
listen: String,
|
||||||
|
#[arg(short, long, default_value = "/var/lib/krata", help = "Storage path")]
|
||||||
|
store: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DaemonCommand {
|
||||||
|
pub async fn run(self) -> Result<()> {
|
||||||
|
let addr = ControlDialAddress::from_str(&self.listen)?;
|
||||||
|
let mut daemon = Daemon::new(self.store.clone()).await?;
|
||||||
|
daemon.listen(addr).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn version() -> String {
|
||||||
|
DaemonCommand::command()
|
||||||
|
.get_version()
|
||||||
|
.unwrap_or("unknown")
|
||||||
|
.to_string()
|
||||||
|
}
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
use std::{collections::HashMap, sync::Arc};
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use circular_buffer::CircularBuffer;
|
use circular_buffer::CircularBuffer;
|
||||||
use kratart::channel::ChannelService;
|
use kratart::channel::ChannelService;
|
||||||
use log::error;
|
use log::error;
|
||||||
@ -11,6 +11,9 @@ use tokio::{
|
|||||||
},
|
},
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
};
|
};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::glt::GuestLookupTable;
|
||||||
|
|
||||||
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
||||||
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
||||||
@ -21,6 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonConsoleHandle {
|
pub struct DaemonConsoleHandle {
|
||||||
|
glt: GuestLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
sender: Sender<(u32, Vec<u8>)>,
|
sender: Sender<(u32, Vec<u8>)>,
|
||||||
@ -50,9 +54,12 @@ impl DaemonConsoleAttachHandle {
|
|||||||
impl DaemonConsoleHandle {
|
impl DaemonConsoleHandle {
|
||||||
pub async fn attach(
|
pub async fn attach(
|
||||||
&self,
|
&self,
|
||||||
domid: u32,
|
uuid: Uuid,
|
||||||
sender: Sender<Vec<u8>>,
|
sender: Sender<Vec<u8>>,
|
||||||
) -> Result<DaemonConsoleAttachHandle> {
|
) -> Result<DaemonConsoleAttachHandle> {
|
||||||
|
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
|
||||||
|
return Err(anyhow!("unable to find domain {}", uuid));
|
||||||
|
};
|
||||||
let buffers = self.buffers.lock().await;
|
let buffers = self.buffers.lock().await;
|
||||||
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
|
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
|
||||||
drop(buffers);
|
drop(buffers);
|
||||||
@ -77,6 +84,7 @@ impl Drop for DaemonConsoleHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonConsole {
|
pub struct DaemonConsole {
|
||||||
|
glt: GuestLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||||
@ -85,13 +93,14 @@ pub struct DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonConsole {
|
impl DaemonConsole {
|
||||||
pub async fn new() -> Result<DaemonConsole> {
|
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
|
||||||
let (service, sender, receiver) =
|
let (service, sender, receiver) =
|
||||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||||
let task = service.launch().await?;
|
let task = service.launch().await?;
|
||||||
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let buffers = Arc::new(Mutex::new(HashMap::new()));
|
let buffers = Arc::new(Mutex::new(HashMap::new()));
|
||||||
Ok(DaemonConsole {
|
Ok(DaemonConsole {
|
||||||
|
glt,
|
||||||
listeners,
|
listeners,
|
||||||
buffers,
|
buffers,
|
||||||
receiver,
|
receiver,
|
||||||
@ -101,6 +110,7 @@ impl DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
|
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
|
||||||
|
let glt = self.glt.clone();
|
||||||
let listeners = self.listeners.clone();
|
let listeners = self.listeners.clone();
|
||||||
let buffers = self.buffers.clone();
|
let buffers = self.buffers.clone();
|
||||||
let sender = self.sender.clone();
|
let sender = self.sender.clone();
|
||||||
@ -110,6 +120,7 @@ impl DaemonConsole {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(DaemonConsoleHandle {
|
Ok(DaemonConsoleHandle {
|
||||||
|
glt,
|
||||||
listeners,
|
listeners,
|
||||||
buffers,
|
buffers,
|
||||||
sender,
|
sender,
|
||||||
|
@ -1,15 +1,17 @@
|
|||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use krata::{
|
use krata::{
|
||||||
idm::protocol::{
|
idm::internal::{
|
||||||
idm_request::Request as IdmRequestType, idm_response::Response as IdmResponseType,
|
exec_stream_request_update::Update, request::Request as IdmRequestType,
|
||||||
IdmMetricsRequest,
|
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
|
||||||
|
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
||||||
},
|
},
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestOciImageFormat, GuestState, GuestStatus},
|
common::{Guest, GuestState, GuestStatus, OciImageFormat},
|
||||||
control::{
|
control::{
|
||||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
||||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
||||||
|
ExecGuestReply, ExecGuestRequest, IdentifyHostReply, IdentifyHostRequest,
|
||||||
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
||||||
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
||||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
||||||
@ -18,7 +20,7 @@ use krata::{
|
|||||||
};
|
};
|
||||||
use krataoci::{
|
use krataoci::{
|
||||||
name::ImageName,
|
name::ImageName,
|
||||||
packer::{service::OciPackerService, OciImagePacked, OciPackedFormat},
|
packer::{service::OciPackerService, OciPackedFormat, OciPackedImage},
|
||||||
progress::{OciProgress, OciProgressContext},
|
progress::{OciProgress, OciProgressContext},
|
||||||
};
|
};
|
||||||
use std::{pin::Pin, str::FromStr};
|
use std::{pin::Pin, str::FromStr};
|
||||||
@ -32,7 +34,8 @@ use tonic::{Request, Response, Status, Streaming};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
console::DaemonConsoleHandle, db::GuestStore, event::DaemonEventContext, idm::DaemonIdmHandle,
|
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
|
||||||
|
event::DaemonEventContext, glt::GuestLookupTable, idm::DaemonIdmHandle,
|
||||||
metrics::idm_metric_to_api, oci::convert_oci_progress,
|
metrics::idm_metric_to_api, oci::convert_oci_progress,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -56,6 +59,7 @@ impl From<ApiError> for Status {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonControlService {
|
pub struct DaemonControlService {
|
||||||
|
glt: GuestLookupTable,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
@ -66,6 +70,7 @@ pub struct DaemonControlService {
|
|||||||
|
|
||||||
impl DaemonControlService {
|
impl DaemonControlService {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
glt: GuestLookupTable,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
@ -74,6 +79,7 @@ impl DaemonControlService {
|
|||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
glt,
|
||||||
events,
|
events,
|
||||||
console,
|
console,
|
||||||
idm,
|
idm,
|
||||||
@ -90,12 +96,15 @@ enum ConsoleDataSelect {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum PullImageSelect {
|
enum PullImageSelect {
|
||||||
Progress(usize),
|
Progress(Option<OciProgress>),
|
||||||
Completed(Result<Result<OciImagePacked, anyhow::Error>, JoinError>),
|
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tonic::async_trait]
|
#[tonic::async_trait]
|
||||||
impl ControlService for DaemonControlService {
|
impl ControlService for DaemonControlService {
|
||||||
|
type ExecGuestStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
type ConsoleDataStream =
|
type ConsoleDataStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
@ -108,6 +117,18 @@ impl ControlService for DaemonControlService {
|
|||||||
type SnoopIdmStream =
|
type SnoopIdmStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn identify_host(
|
||||||
|
&self,
|
||||||
|
request: Request<IdentifyHostRequest>,
|
||||||
|
) -> Result<Response<IdentifyHostReply>, Status> {
|
||||||
|
let _ = request.into_inner();
|
||||||
|
Ok(Response::new(IdentifyHostReply {
|
||||||
|
host_domid: self.glt.host_domid(),
|
||||||
|
host_uuid: self.glt.host_uuid().to_string(),
|
||||||
|
krata_version: DaemonCommand::version(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
async fn create_guest(
|
async fn create_guest(
|
||||||
&self,
|
&self,
|
||||||
request: Request<CreateGuestRequest>,
|
request: Request<CreateGuestRequest>,
|
||||||
@ -130,6 +151,7 @@ impl ControlService for DaemonControlService {
|
|||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
|
host: self.glt.host_uuid().to_string(),
|
||||||
domid: u32::MAX,
|
domid: u32::MAX,
|
||||||
}),
|
}),
|
||||||
spec: Some(spec),
|
spec: Some(spec),
|
||||||
@ -148,6 +170,98 @@ impl ControlService for DaemonControlService {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn exec_guest(
|
||||||
|
&self,
|
||||||
|
request: Request<Streaming<ExecGuestRequest>>,
|
||||||
|
) -> Result<Response<Self::ExecGuestStream>, Status> {
|
||||||
|
let mut input = request.into_inner();
|
||||||
|
let Some(request) = input.next().await else {
|
||||||
|
return Err(ApiError {
|
||||||
|
message: "expected to have at least one request".to_string(),
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
};
|
||||||
|
let request = request?;
|
||||||
|
|
||||||
|
let Some(task) = request.task else {
|
||||||
|
return Err(ApiError {
|
||||||
|
message: "task is missing".to_string(),
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
};
|
||||||
|
|
||||||
|
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||||
|
message: error.to_string(),
|
||||||
|
})?;
|
||||||
|
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
|
message: error.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let idm_request = IdmRequest {
|
||||||
|
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
||||||
|
update: Some(Update::Start(ExecStreamRequestStart {
|
||||||
|
environment: task
|
||||||
|
.environment
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| ExecEnvVar {
|
||||||
|
key: x.key,
|
||||||
|
value: x.value,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
command: task.command,
|
||||||
|
working_directory: task.working_directory,
|
||||||
|
})),
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
|
||||||
|
let output = try_stream! {
|
||||||
|
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
|
||||||
|
message: x.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
x = input.next() => if let Some(update) = x {
|
||||||
|
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
|
||||||
|
message: error.to_string()
|
||||||
|
}.into());
|
||||||
|
|
||||||
|
if let Ok(update) = update {
|
||||||
|
if !update.data.is_empty() {
|
||||||
|
let _ = handle.update(IdmRequest {
|
||||||
|
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
||||||
|
update: Some(Update::Stdin(ExecStreamRequestStdin {
|
||||||
|
data: update.data,
|
||||||
|
})),
|
||||||
|
}))}).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
x = handle.receiver.recv() => match x {
|
||||||
|
Some(response) => {
|
||||||
|
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
let reply = ExecGuestReply {
|
||||||
|
exited: update.exited,
|
||||||
|
error: update.error,
|
||||||
|
exit_code: update.exit_code,
|
||||||
|
stdout: update.stdout,
|
||||||
|
stderr: update.stderr
|
||||||
|
};
|
||||||
|
yield reply;
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
|
||||||
|
}
|
||||||
|
|
||||||
async fn destroy_guest(
|
async fn destroy_guest(
|
||||||
&self,
|
&self,
|
||||||
request: Request<DestroyGuestRequest>,
|
request: Request<DestroyGuestRequest>,
|
||||||
@ -230,36 +344,10 @@ impl ControlService for DaemonControlService {
|
|||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let guest = self
|
|
||||||
.guests
|
|
||||||
.read(uuid)
|
|
||||||
.await
|
|
||||||
.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?
|
|
||||||
.ok_or_else(|| ApiError {
|
|
||||||
message: "guest did not exist in the database".to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let Some(ref state) = guest.state else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "guest did not have state".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
|
|
||||||
let domid = state.domid;
|
|
||||||
if domid == 0 {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "invalid domid on the guest".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let (sender, mut receiver) = channel(100);
|
let (sender, mut receiver) = channel(100);
|
||||||
let console = self
|
let console = self
|
||||||
.console
|
.console
|
||||||
.attach(domid, sender)
|
.attach(uuid, sender)
|
||||||
.await
|
.await
|
||||||
.map_err(|error| ApiError {
|
.map_err(|error| ApiError {
|
||||||
message: format!("failed to attach to console: {}", error),
|
message: format!("failed to attach to console: {}", error),
|
||||||
@ -309,45 +397,21 @@ impl ControlService for DaemonControlService {
|
|||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let guest = self
|
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
.guests
|
|
||||||
.read(uuid)
|
|
||||||
.await
|
|
||||||
.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?
|
|
||||||
.ok_or_else(|| ApiError {
|
|
||||||
message: "guest did not exist in the database".to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let Some(ref state) = guest.state else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "guest did not have state".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
|
|
||||||
let domid = state.domid;
|
|
||||||
if domid == 0 {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "invalid domid on the guest".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let client = self.idm.client(domid).await.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
.send(IdmRequestType::Metrics(IdmMetricsRequest {}))
|
.send(IdmRequest {
|
||||||
|
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
|
||||||
|
})
|
||||||
.await
|
.await
|
||||||
.map_err(|error| ApiError {
|
.map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut reply = ReadGuestMetricsReply::default();
|
let mut reply = ReadGuestMetricsReply::default();
|
||||||
if let IdmResponseType::Metrics(metrics) = response {
|
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
||||||
reply.root = metrics.root.map(idm_metric_to_api);
|
reply.root = metrics.root.map(idm_metric_to_api);
|
||||||
}
|
}
|
||||||
Ok(Response::new(reply))
|
Ok(Response::new(reply))
|
||||||
@ -362,36 +426,39 @@ impl ControlService for DaemonControlService {
|
|||||||
message: err.to_string(),
|
message: err.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let format = match request.format() {
|
let format = match request.format() {
|
||||||
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
||||||
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||||
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
|
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||||
|
OciImageFormat::Tar => OciPackedFormat::Tar,
|
||||||
};
|
};
|
||||||
let (sender, mut receiver) = channel::<OciProgress>(100);
|
let (context, mut receiver) = OciProgressContext::create();
|
||||||
let context = OciProgressContext::new(sender);
|
|
||||||
|
|
||||||
let our_packer = self.packer.clone();
|
let our_packer = self.packer.clone();
|
||||||
|
|
||||||
let output = try_stream! {
|
let output = try_stream! {
|
||||||
let mut task = tokio::task::spawn(async move {
|
let mut task = tokio::task::spawn(async move {
|
||||||
our_packer.request(name, format, context).await
|
our_packer.request(name, format, request.overwrite_cache, context).await
|
||||||
});
|
});
|
||||||
|
let abort_handle = task.abort_handle();
|
||||||
|
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
|
||||||
|
handle.abort();
|
||||||
|
});
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut progresses = Vec::new();
|
|
||||||
let what = select! {
|
let what = select! {
|
||||||
x = receiver.recv_many(&mut progresses, 10) => PullImageSelect::Progress(x),
|
x = receiver.changed() => match x {
|
||||||
|
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
|
||||||
|
Err(_) => PullImageSelect::Progress(None),
|
||||||
|
},
|
||||||
x = &mut task => PullImageSelect::Completed(x),
|
x = &mut task => PullImageSelect::Completed(x),
|
||||||
};
|
};
|
||||||
match what {
|
match what {
|
||||||
PullImageSelect::Progress(count) => {
|
PullImageSelect::Progress(Some(progress)) => {
|
||||||
if count > 0 {
|
let reply = PullImageReply {
|
||||||
let progress = progresses.remove(progresses.len() - 1);
|
progress: Some(convert_oci_progress(progress)),
|
||||||
let reply = PullImageReply {
|
digest: String::new(),
|
||||||
progress: Some(convert_oci_progress(progress)),
|
format: OciImageFormat::Unknown.into(),
|
||||||
digest: String::new(),
|
};
|
||||||
format: GuestOciImageFormat::Unknown.into(),
|
yield reply;
|
||||||
};
|
|
||||||
yield reply;
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
|
||||||
PullImageSelect::Completed(result) => {
|
PullImageSelect::Completed(result) => {
|
||||||
@ -405,13 +472,18 @@ impl ControlService for DaemonControlService {
|
|||||||
progress: None,
|
progress: None,
|
||||||
digest: packed.digest,
|
digest: packed.digest,
|
||||||
format: match packed.format {
|
format: match packed.format {
|
||||||
OciPackedFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
||||||
OciPackedFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
|
||||||
|
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
yield reply;
|
yield reply;
|
||||||
break;
|
break;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -438,9 +510,16 @@ impl ControlService for DaemonControlService {
|
|||||||
) -> Result<Response<Self::SnoopIdmStream>, Status> {
|
) -> Result<Response<Self::SnoopIdmStream>, Status> {
|
||||||
let _ = request.into_inner();
|
let _ = request.into_inner();
|
||||||
let mut messages = self.idm.snoop();
|
let mut messages = self.idm.snoop();
|
||||||
|
let glt = self.glt.clone();
|
||||||
let output = try_stream! {
|
let output = try_stream! {
|
||||||
while let Ok(event) = messages.recv().await {
|
while let Ok(event) = messages.recv().await {
|
||||||
yield SnoopIdmReply { from: event.from, to: event.to, packet: Some(event.packet) };
|
let Some(from_uuid) = glt.lookup_uuid_by_domid(event.from).await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some(to_uuid) = glt.lookup_uuid_by_domid(event.to).await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
|
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
|
||||||
|
@ -6,7 +6,7 @@ use std::{
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::{
|
use krata::{
|
||||||
idm::protocol::{idm_event::Event, IdmEvent},
|
idm::{internal::event::Event as EventType, internal::Event},
|
||||||
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
||||||
};
|
};
|
||||||
use log::{error, warn};
|
use log::{error, warn};
|
||||||
@ -50,8 +50,8 @@ pub struct DaemonEventGenerator {
|
|||||||
feed: broadcast::Receiver<DaemonEvent>,
|
feed: broadcast::Receiver<DaemonEvent>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
||||||
idm_sender: Sender<(u32, IdmEvent)>,
|
idm_sender: Sender<(u32, Event)>,
|
||||||
idm_receiver: Receiver<(u32, IdmEvent)>,
|
idm_receiver: Receiver<(u32, Event)>,
|
||||||
_event_sender: broadcast::Sender<DaemonEvent>,
|
_event_sender: broadcast::Sender<DaemonEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ impl DaemonEventGenerator {
|
|||||||
match status {
|
match status {
|
||||||
GuestStatus::Started => {
|
GuestStatus::Started => {
|
||||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||||
let client = self.idm.client(domid).await?;
|
let client = self.idm.client_by_domid(domid).await?;
|
||||||
let mut receiver = client.subscribe().await?;
|
let mut receiver = client.subscribe().await?;
|
||||||
let sender = self.idm_sender.clone();
|
let sender = self.idm_sender.clone();
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
@ -122,9 +122,9 @@ impl DaemonEventGenerator {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_idm_event(&mut self, id: Uuid, event: IdmEvent) -> Result<()> {
|
async fn handle_idm_event(&mut self, id: Uuid, event: Event) -> Result<()> {
|
||||||
match event.event {
|
match event.event {
|
||||||
Some(Event::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
|
Some(EventType::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
|
||||||
None => Ok(()),
|
None => Ok(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,6 +136,7 @@ impl DaemonEventGenerator {
|
|||||||
network: guest.state.clone().unwrap_or_default().network,
|
network: guest.state.clone().unwrap_or_default().network,
|
||||||
exit_info: Some(GuestExitInfo { code }),
|
exit_info: Some(GuestExitInfo { code }),
|
||||||
error_info: None,
|
error_info: None,
|
||||||
|
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||||
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
69
crates/daemon/src/glt.rs
Normal file
69
crates/daemon/src/glt.rs
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
struct GuestLookupTableState {
|
||||||
|
domid_to_uuid: HashMap<u32, Uuid>,
|
||||||
|
uuid_to_domid: HashMap<Uuid, u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GuestLookupTableState {
|
||||||
|
pub fn new(host_uuid: Uuid) -> Self {
|
||||||
|
let mut domid_to_uuid = HashMap::new();
|
||||||
|
let mut uuid_to_domid = HashMap::new();
|
||||||
|
domid_to_uuid.insert(0, host_uuid);
|
||||||
|
uuid_to_domid.insert(host_uuid, 0);
|
||||||
|
GuestLookupTableState {
|
||||||
|
domid_to_uuid,
|
||||||
|
uuid_to_domid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GuestLookupTable {
|
||||||
|
host_domid: u32,
|
||||||
|
host_uuid: Uuid,
|
||||||
|
state: Arc<RwLock<GuestLookupTableState>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GuestLookupTable {
|
||||||
|
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
||||||
|
GuestLookupTable {
|
||||||
|
host_domid,
|
||||||
|
host_uuid,
|
||||||
|
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn host_uuid(&self) -> Uuid {
|
||||||
|
self.host_uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn host_domid(&self) -> u32 {
|
||||||
|
self.host_domid
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn lookup_uuid_by_domid(&self, domid: u32) -> Option<Uuid> {
|
||||||
|
let state = self.state.read().await;
|
||||||
|
state.domid_to_uuid.get(&domid).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn lookup_domid_by_uuid(&self, uuid: &Uuid) -> Option<u32> {
|
||||||
|
let state = self.state.read().await;
|
||||||
|
state.uuid_to_domid.get(uuid).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn associate(&self, uuid: Uuid, domid: u32) {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
state.uuid_to_domid.insert(uuid, domid);
|
||||||
|
state.domid_to_uuid.insert(domid, uuid);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn remove(&self, uuid: Uuid, domid: u32) {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
state.uuid_to_domid.remove(&uuid);
|
||||||
|
state.domid_to_uuid.remove(&domid);
|
||||||
|
}
|
||||||
|
}
|
@ -6,8 +6,9 @@ use std::{
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use bytes::{Buf, BytesMut};
|
use bytes::{Buf, BytesMut};
|
||||||
use krata::idm::{
|
use krata::idm::{
|
||||||
client::{IdmBackend, IdmClient},
|
client::{IdmBackend, IdmInternalClient},
|
||||||
protocol::IdmPacket,
|
internal::INTERNAL_IDM_CHANNEL,
|
||||||
|
transport::IdmTransportPacket,
|
||||||
};
|
};
|
||||||
use kratart::channel::ChannelService;
|
use kratart::channel::ChannelService;
|
||||||
use log::{error, warn};
|
use log::{error, warn};
|
||||||
@ -21,15 +22,19 @@ use tokio::{
|
|||||||
},
|
},
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
};
|
};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmPacket>>>>;
|
use crate::glt::GuestLookupTable;
|
||||||
type ClientMap = Arc<Mutex<HashMap<u32, IdmClient>>>;
|
|
||||||
|
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
||||||
|
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonIdmHandle {
|
pub struct DaemonIdmHandle {
|
||||||
|
glt: GuestLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
task: Arc<JoinHandle<()>>,
|
task: Arc<JoinHandle<()>>,
|
||||||
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
||||||
}
|
}
|
||||||
@ -39,7 +44,14 @@ impl DaemonIdmHandle {
|
|||||||
self.snoop_sender.subscribe()
|
self.snoop_sender.subscribe()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn client(&self, domid: u32) -> Result<IdmClient> {
|
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
|
||||||
|
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
|
||||||
|
return Err(anyhow!("unable to find domain {}", uuid));
|
||||||
|
};
|
||||||
|
self.client_by_domid(domid).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn client_by_domid(&self, domid: u32) -> Result<IdmInternalClient> {
|
||||||
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
|
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -56,22 +68,23 @@ impl Drop for DaemonIdmHandle {
|
|||||||
pub struct DaemonIdmSnoopPacket {
|
pub struct DaemonIdmSnoopPacket {
|
||||||
pub from: u32,
|
pub from: u32,
|
||||||
pub to: u32,
|
pub to: u32,
|
||||||
pub packet: IdmPacket,
|
pub packet: IdmTransportPacket,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonIdm {
|
pub struct DaemonIdm {
|
||||||
|
glt: GuestLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
tx_raw_sender: Sender<(u32, Vec<u8>)>,
|
tx_raw_sender: Sender<(u32, Vec<u8>)>,
|
||||||
tx_receiver: Receiver<(u32, IdmPacket)>,
|
tx_receiver: Receiver<(u32, IdmTransportPacket)>,
|
||||||
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||||
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
||||||
task: JoinHandle<()>,
|
task: JoinHandle<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonIdm {
|
impl DaemonIdm {
|
||||||
pub async fn new() -> Result<DaemonIdm> {
|
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
|
||||||
let (service, tx_raw_sender, rx_receiver) =
|
let (service, tx_raw_sender, rx_receiver) =
|
||||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||||
let (tx_sender, tx_receiver) = channel(100);
|
let (tx_sender, tx_receiver) = channel(100);
|
||||||
@ -80,6 +93,7 @@ impl DaemonIdm {
|
|||||||
let clients = Arc::new(Mutex::new(HashMap::new()));
|
let clients = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let feeds = Arc::new(Mutex::new(HashMap::new()));
|
let feeds = Arc::new(Mutex::new(HashMap::new()));
|
||||||
Ok(DaemonIdm {
|
Ok(DaemonIdm {
|
||||||
|
glt,
|
||||||
rx_receiver,
|
rx_receiver,
|
||||||
tx_receiver,
|
tx_receiver,
|
||||||
tx_sender,
|
tx_sender,
|
||||||
@ -92,6 +106,7 @@ impl DaemonIdm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
|
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
|
||||||
|
let glt = self.glt.clone();
|
||||||
let clients = self.clients.clone();
|
let clients = self.clients.clone();
|
||||||
let feeds = self.feeds.clone();
|
let feeds = self.feeds.clone();
|
||||||
let tx_sender = self.tx_sender.clone();
|
let tx_sender = self.tx_sender.clone();
|
||||||
@ -104,6 +119,7 @@ impl DaemonIdm {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(DaemonIdmHandle {
|
Ok(DaemonIdmHandle {
|
||||||
|
glt,
|
||||||
clients,
|
clients,
|
||||||
feeds,
|
feeds,
|
||||||
tx_sender,
|
tx_sender,
|
||||||
@ -136,7 +152,7 @@ impl DaemonIdm {
|
|||||||
}
|
}
|
||||||
let mut packet = buffer.split_to(needed);
|
let mut packet = buffer.split_to(needed);
|
||||||
packet.advance(6);
|
packet.advance(6);
|
||||||
match IdmPacket::decode(packet) {
|
match IdmTransportPacket::decode(packet) {
|
||||||
Ok(packet) => {
|
Ok(packet) => {
|
||||||
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
|
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
|
||||||
let guard = self.feeds.lock().await;
|
let guard = self.feeds.lock().await;
|
||||||
@ -196,10 +212,10 @@ impl Drop for DaemonIdm {
|
|||||||
|
|
||||||
async fn client_or_create(
|
async fn client_or_create(
|
||||||
domid: u32,
|
domid: u32,
|
||||||
tx_sender: &Sender<(u32, IdmPacket)>,
|
tx_sender: &Sender<(u32, IdmTransportPacket)>,
|
||||||
clients: &ClientMap,
|
clients: &ClientMap,
|
||||||
feeds: &BackendFeedMap,
|
feeds: &BackendFeedMap,
|
||||||
) -> Result<IdmClient> {
|
) -> Result<IdmInternalClient> {
|
||||||
let mut clients = clients.lock().await;
|
let mut clients = clients.lock().await;
|
||||||
let mut feeds = feeds.lock().await;
|
let mut feeds = feeds.lock().await;
|
||||||
match clients.entry(domid) {
|
match clients.entry(domid) {
|
||||||
@ -212,7 +228,11 @@ async fn client_or_create(
|
|||||||
rx_receiver,
|
rx_receiver,
|
||||||
tx_sender: tx_sender.clone(),
|
tx_sender: tx_sender.clone(),
|
||||||
};
|
};
|
||||||
let client = IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await?;
|
let client = IdmInternalClient::new(
|
||||||
|
INTERNAL_IDM_CHANNEL,
|
||||||
|
Box::new(backend) as Box<dyn IdmBackend>,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
entry.insert(client.clone());
|
entry.insert(client.clone());
|
||||||
Ok(client)
|
Ok(client)
|
||||||
}
|
}
|
||||||
@ -221,13 +241,13 @@ async fn client_or_create(
|
|||||||
|
|
||||||
pub struct IdmDaemonBackend {
|
pub struct IdmDaemonBackend {
|
||||||
domid: u32,
|
domid: u32,
|
||||||
rx_receiver: Receiver<IdmPacket>,
|
rx_receiver: Receiver<IdmTransportPacket>,
|
||||||
tx_sender: Sender<(u32, IdmPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl IdmBackend for IdmDaemonBackend {
|
impl IdmBackend for IdmDaemonBackend {
|
||||||
async fn recv(&mut self) -> Result<IdmPacket> {
|
async fn recv(&mut self) -> Result<IdmTransportPacket> {
|
||||||
if let Some(packet) = self.rx_receiver.recv().await {
|
if let Some(packet) = self.rx_receiver.recv().await {
|
||||||
Ok(packet)
|
Ok(packet)
|
||||||
} else {
|
} else {
|
||||||
@ -235,7 +255,7 @@ impl IdmBackend for IdmDaemonBackend {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
|
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
|
||||||
self.tx_sender.send((self.domid, packet)).await?;
|
self.tx_sender.send((self.domid, packet)).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
use std::{net::SocketAddr, path::PathBuf, str::FromStr};
|
use std::{net::SocketAddr, path::PathBuf, str::FromStr};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||||
use control::DaemonControlService;
|
use control::DaemonControlService;
|
||||||
use db::GuestStore;
|
use db::GuestStore;
|
||||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||||
|
use glt::GuestLookupTable;
|
||||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||||
@ -21,10 +22,12 @@ use tokio_stream::wrappers::UnixListenerStream;
|
|||||||
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
pub mod command;
|
||||||
pub mod console;
|
pub mod console;
|
||||||
pub mod control;
|
pub mod control;
|
||||||
pub mod db;
|
pub mod db;
|
||||||
pub mod event;
|
pub mod event;
|
||||||
|
pub mod glt;
|
||||||
pub mod idm;
|
pub mod idm;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
@ -32,6 +35,7 @@ pub mod reconcile;
|
|||||||
|
|
||||||
pub struct Daemon {
|
pub struct Daemon {
|
||||||
store: String,
|
store: String,
|
||||||
|
glt: GuestLookupTable,
|
||||||
guests: GuestStore,
|
guests: GuestStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
guest_reconciler_task: JoinHandle<()>,
|
guest_reconciler_task: JoinHandle<()>,
|
||||||
@ -51,27 +55,52 @@ impl Daemon {
|
|||||||
image_cache_dir.push("image");
|
image_cache_dir.push("image");
|
||||||
fs::create_dir_all(&image_cache_dir).await?;
|
fs::create_dir_all(&image_cache_dir).await?;
|
||||||
|
|
||||||
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current())?;
|
let mut host_uuid_path = PathBuf::from(store.clone());
|
||||||
|
host_uuid_path.push("host.uuid");
|
||||||
|
let host_uuid = if host_uuid_path.is_file() {
|
||||||
|
let content = fs::read_to_string(&host_uuid_path).await?;
|
||||||
|
Uuid::from_str(content.trim()).ok()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let runtime = Runtime::new(store.clone()).await?;
|
let host_uuid = if let Some(host_uuid) = host_uuid {
|
||||||
|
host_uuid
|
||||||
|
} else {
|
||||||
|
let generated = Uuid::new_v4();
|
||||||
|
let mut string = generated.to_string();
|
||||||
|
string.push('\n');
|
||||||
|
fs::write(&host_uuid_path, string).await?;
|
||||||
|
generated
|
||||||
|
};
|
||||||
|
|
||||||
|
let initrd_path = detect_guest_file(&store, "initrd")?;
|
||||||
|
let kernel_path = detect_guest_file(&store, "kernel")?;
|
||||||
|
|
||||||
|
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current()).await?;
|
||||||
|
let runtime = Runtime::new().await?;
|
||||||
|
let glt = GuestLookupTable::new(0, host_uuid);
|
||||||
let guests_db_path = format!("{}/guests.db", store);
|
let guests_db_path = format!("{}/guests.db", store);
|
||||||
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
||||||
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
||||||
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
||||||
let idm = DaemonIdm::new().await?;
|
let idm = DaemonIdm::new(glt.clone()).await?;
|
||||||
let idm = idm.launch().await?;
|
let idm = idm.launch().await?;
|
||||||
let console = DaemonConsole::new().await?;
|
let console = DaemonConsole::new(glt.clone()).await?;
|
||||||
let console = console.launch().await?;
|
let console = console.launch().await?;
|
||||||
let (events, generator) =
|
let (events, generator) =
|
||||||
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
||||||
.await?;
|
.await?;
|
||||||
let runtime_for_reconciler = runtime.dupe().await?;
|
let runtime_for_reconciler = runtime.dupe().await?;
|
||||||
let guest_reconciler = GuestReconciler::new(
|
let guest_reconciler = GuestReconciler::new(
|
||||||
|
glt.clone(),
|
||||||
guests.clone(),
|
guests.clone(),
|
||||||
events.clone(),
|
events.clone(),
|
||||||
runtime_for_reconciler,
|
runtime_for_reconciler,
|
||||||
packer.clone(),
|
packer.clone(),
|
||||||
guest_reconciler_notify.clone(),
|
guest_reconciler_notify.clone(),
|
||||||
|
kernel_path,
|
||||||
|
initrd_path,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
||||||
@ -79,6 +108,7 @@ impl Daemon {
|
|||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
store,
|
store,
|
||||||
|
glt,
|
||||||
guests,
|
guests,
|
||||||
events,
|
events,
|
||||||
guest_reconciler_task,
|
guest_reconciler_task,
|
||||||
@ -92,6 +122,7 @@ impl Daemon {
|
|||||||
|
|
||||||
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
||||||
let control_service = DaemonControlService::new(
|
let control_service = DaemonControlService::new(
|
||||||
|
self.glt.clone(),
|
||||||
self.events.clone(),
|
self.events.clone(),
|
||||||
self.console.clone(),
|
self.console.clone(),
|
||||||
self.idm.clone(),
|
self.idm.clone(),
|
||||||
@ -154,3 +185,16 @@ impl Drop for Daemon {
|
|||||||
self.generator_task.abort();
|
self.generator_task.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn detect_guest_file(store: &str, name: &str) -> Result<PathBuf> {
|
||||||
|
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
|
||||||
|
if path.is_file() {
|
||||||
|
return Ok(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
|
||||||
|
if path.is_file() {
|
||||||
|
return Ok(path);
|
||||||
|
}
|
||||||
|
Err(anyhow!("unable to find required guest file: {}", name))
|
||||||
|
}
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
use krata::{
|
use krata::{
|
||||||
idm::protocol::{IdmMetricFormat, IdmMetricNode},
|
idm::internal::{MetricFormat, MetricNode},
|
||||||
v1::common::{GuestMetricFormat, GuestMetricNode},
|
v1::common::{GuestMetricFormat, GuestMetricNode},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn idm_metric_format_to_api(format: IdmMetricFormat) -> GuestMetricFormat {
|
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
|
||||||
match format {
|
match format {
|
||||||
IdmMetricFormat::Unknown => GuestMetricFormat::Unknown,
|
MetricFormat::Unknown => GuestMetricFormat::Unknown,
|
||||||
IdmMetricFormat::Bytes => GuestMetricFormat::Bytes,
|
MetricFormat::Bytes => GuestMetricFormat::Bytes,
|
||||||
IdmMetricFormat::Integer => GuestMetricFormat::Integer,
|
MetricFormat::Integer => GuestMetricFormat::Integer,
|
||||||
IdmMetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn idm_metric_to_api(node: IdmMetricNode) -> GuestMetricNode {
|
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
|
||||||
let format = node.format();
|
let format = node.format();
|
||||||
GuestMetricNode {
|
GuestMetricNode {
|
||||||
name: node.name,
|
name: node.name,
|
||||||
|
@ -1,33 +1,72 @@
|
|||||||
use krata::v1::control::{
|
use krata::v1::control::{
|
||||||
PullImageProgress, PullImageProgressLayer, PullImageProgressLayerPhase, PullImageProgressPhase,
|
image_progress_indication::Indication, ImageProgress, ImageProgressIndication,
|
||||||
|
ImageProgressIndicationBar, ImageProgressIndicationCompleted, ImageProgressIndicationHidden,
|
||||||
|
ImageProgressIndicationSpinner, ImageProgressLayer, ImageProgressLayerPhase,
|
||||||
|
ImageProgressPhase,
|
||||||
|
};
|
||||||
|
use krataoci::progress::{
|
||||||
|
OciProgress, OciProgressIndication, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase,
|
||||||
};
|
};
|
||||||
use krataoci::progress::{OciProgress, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase};
|
|
||||||
|
|
||||||
fn convert_oci_layer_progress(layer: OciProgressLayer) -> PullImageProgressLayer {
|
fn convert_oci_progress_indication(indication: OciProgressIndication) -> ImageProgressIndication {
|
||||||
PullImageProgressLayer {
|
ImageProgressIndication {
|
||||||
id: layer.id,
|
indication: Some(match indication {
|
||||||
phase: match layer.phase {
|
OciProgressIndication::Hidden => Indication::Hidden(ImageProgressIndicationHidden {}),
|
||||||
OciProgressLayerPhase::Waiting => PullImageProgressLayerPhase::Waiting,
|
OciProgressIndication::ProgressBar {
|
||||||
OciProgressLayerPhase::Downloading => PullImageProgressLayerPhase::Downloading,
|
message,
|
||||||
OciProgressLayerPhase::Downloaded => PullImageProgressLayerPhase::Downloaded,
|
current,
|
||||||
OciProgressLayerPhase::Extracting => PullImageProgressLayerPhase::Extracting,
|
total,
|
||||||
OciProgressLayerPhase::Extracted => PullImageProgressLayerPhase::Extracted,
|
bytes,
|
||||||
}
|
} => Indication::Bar(ImageProgressIndicationBar {
|
||||||
.into(),
|
message: message.unwrap_or_default(),
|
||||||
value: layer.value,
|
current,
|
||||||
total: layer.total,
|
total,
|
||||||
|
is_bytes: bytes,
|
||||||
|
}),
|
||||||
|
OciProgressIndication::Spinner { message } => {
|
||||||
|
Indication::Spinner(ImageProgressIndicationSpinner {
|
||||||
|
message: message.unwrap_or_default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
OciProgressIndication::Completed {
|
||||||
|
message,
|
||||||
|
total,
|
||||||
|
bytes,
|
||||||
|
} => Indication::Completed(ImageProgressIndicationCompleted {
|
||||||
|
message: message.unwrap_or_default(),
|
||||||
|
total: total.unwrap_or(0),
|
||||||
|
is_bytes: bytes,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
|
fn convert_oci_layer_progress(layer: OciProgressLayer) -> ImageProgressLayer {
|
||||||
PullImageProgress {
|
ImageProgressLayer {
|
||||||
|
id: layer.id,
|
||||||
|
phase: match layer.phase {
|
||||||
|
OciProgressLayerPhase::Waiting => ImageProgressLayerPhase::Waiting,
|
||||||
|
OciProgressLayerPhase::Downloading => ImageProgressLayerPhase::Downloading,
|
||||||
|
OciProgressLayerPhase::Downloaded => ImageProgressLayerPhase::Downloaded,
|
||||||
|
OciProgressLayerPhase::Extracting => ImageProgressLayerPhase::Extracting,
|
||||||
|
OciProgressLayerPhase::Extracted => ImageProgressLayerPhase::Extracted,
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
indication: Some(convert_oci_progress_indication(layer.indication)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn convert_oci_progress(oci: OciProgress) -> ImageProgress {
|
||||||
|
ImageProgress {
|
||||||
phase: match oci.phase {
|
phase: match oci.phase {
|
||||||
OciProgressPhase::Resolving => PullImageProgressPhase::Resolving,
|
OciProgressPhase::Started => ImageProgressPhase::Started,
|
||||||
OciProgressPhase::Resolved => PullImageProgressPhase::Resolved,
|
OciProgressPhase::Resolving => ImageProgressPhase::Resolving,
|
||||||
OciProgressPhase::ConfigAcquire => PullImageProgressPhase::ConfigAcquire,
|
OciProgressPhase::Resolved => ImageProgressPhase::Resolved,
|
||||||
OciProgressPhase::LayerAcquire => PullImageProgressPhase::LayerAcquire,
|
OciProgressPhase::ConfigDownload => ImageProgressPhase::ConfigDownload,
|
||||||
OciProgressPhase::Packing => PullImageProgressPhase::Packing,
|
OciProgressPhase::LayerDownload => ImageProgressPhase::LayerDownload,
|
||||||
OciProgressPhase::Complete => PullImageProgressPhase::Complete,
|
OciProgressPhase::Assemble => ImageProgressPhase::Assemble,
|
||||||
|
OciProgressPhase::Pack => ImageProgressPhase::Pack,
|
||||||
|
OciProgressPhase::Complete => ImageProgressPhase::Complete,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
layers: oci
|
layers: oci
|
||||||
@ -35,7 +74,6 @@ pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
|
|||||||
.into_values()
|
.into_values()
|
||||||
.map(convert_oci_layer_progress)
|
.map(convert_oci_layer_progress)
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
value: oci.value,
|
indication: Some(convert_oci_progress_indication(oci.indication)),
|
||||||
total: oci.total,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,20 +1,17 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, HashMap},
|
collections::{hash_map::Entry, HashMap},
|
||||||
|
path::PathBuf,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::Result;
|
||||||
use krata::launchcfg::LaunchPackedFormat;
|
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{
|
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
|
||||||
guest_image_spec::Image, Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState,
|
|
||||||
GuestOciImageFormat, GuestState, GuestStatus,
|
|
||||||
},
|
|
||||||
control::GuestChangedEvent,
|
control::GuestChangedEvent,
|
||||||
};
|
};
|
||||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
use krataoci::packer::service::OciPackerService;
|
||||||
use kratart::{launch::GuestLaunchRequest, GuestInfo, Runtime};
|
use kratart::{GuestInfo, Runtime};
|
||||||
use log::{error, info, trace, warn};
|
use log::{error, info, trace, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
@ -30,8 +27,13 @@ use uuid::Uuid;
|
|||||||
use crate::{
|
use crate::{
|
||||||
db::GuestStore,
|
db::GuestStore,
|
||||||
event::{DaemonEvent, DaemonEventContext},
|
event::{DaemonEvent, DaemonEventContext},
|
||||||
|
glt::GuestLookupTable,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use self::start::GuestStarter;
|
||||||
|
|
||||||
|
mod start;
|
||||||
|
|
||||||
const PARALLEL_LIMIT: u32 = 5;
|
const PARALLEL_LIMIT: u32 = 5;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -53,28 +55,38 @@ impl Drop for GuestReconcilerEntry {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestReconciler {
|
pub struct GuestReconciler {
|
||||||
|
glt: GuestLookupTable,
|
||||||
guests: GuestStore,
|
guests: GuestStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
|
kernel_path: PathBuf,
|
||||||
|
initrd_path: PathBuf,
|
||||||
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
guest_reconciler_notify: Sender<Uuid>,
|
||||||
reconcile_lock: Arc<RwLock<()>>,
|
reconcile_lock: Arc<RwLock<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestReconciler {
|
impl GuestReconciler {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
glt: GuestLookupTable,
|
||||||
guests: GuestStore,
|
guests: GuestStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
guest_reconciler_notify: Sender<Uuid>,
|
||||||
|
kernel_path: PathBuf,
|
||||||
|
initrd_path: PathBuf,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
glt,
|
||||||
guests,
|
guests,
|
||||||
events,
|
events,
|
||||||
runtime,
|
runtime,
|
||||||
packer,
|
packer,
|
||||||
|
kernel_path,
|
||||||
|
initrd_path,
|
||||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||||
guest_reconciler_notify,
|
guest_reconciler_notify,
|
||||||
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||||
@ -123,6 +135,23 @@ impl GuestReconciler {
|
|||||||
trace!("reconciling runtime");
|
trace!("reconciling runtime");
|
||||||
let runtime_guests = self.runtime.list().await?;
|
let runtime_guests = self.runtime.list().await?;
|
||||||
let stored_guests = self.guests.list().await?;
|
let stored_guests = self.guests.list().await?;
|
||||||
|
|
||||||
|
let non_existent_guests = runtime_guests
|
||||||
|
.iter()
|
||||||
|
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
for guest in non_existent_guests {
|
||||||
|
warn!("destroying unknown runtime guest {}", guest.uuid);
|
||||||
|
if let Err(error) = self.runtime.destroy(guest.uuid).await {
|
||||||
|
error!(
|
||||||
|
"failed to destroy unknown runtime guest {}: {}",
|
||||||
|
guest.uuid, error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
self.guests.remove(guest.uuid).await?;
|
||||||
|
}
|
||||||
|
|
||||||
for (uuid, mut stored_guest) in stored_guests {
|
for (uuid, mut stored_guest) in stored_guests {
|
||||||
let previous_guest = stored_guest.clone();
|
let previous_guest = stored_guest.clone();
|
||||||
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
|
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
|
||||||
@ -136,6 +165,7 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Some(runtime) => {
|
Some(runtime) => {
|
||||||
|
self.glt.associate(uuid, runtime.domid).await;
|
||||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
||||||
if let Some(code) = runtime.state.exit_code {
|
if let Some(code) = runtime.state.exit_code {
|
||||||
state.status = GuestStatus::Exited.into();
|
state.status = GuestStatus::Exited.into();
|
||||||
@ -224,71 +254,14 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||||
let Some(ref spec) = guest.spec else {
|
let starter = GuestStarter {
|
||||||
return Err(anyhow!("guest spec not specified"));
|
kernel_path: &self.kernel_path,
|
||||||
|
initrd_path: &self.initrd_path,
|
||||||
|
packer: &self.packer,
|
||||||
|
glt: &self.glt,
|
||||||
|
runtime: &self.runtime,
|
||||||
};
|
};
|
||||||
|
starter.start(uuid, guest).await
|
||||||
let Some(ref image) = spec.image else {
|
|
||||||
return Err(anyhow!("image spec not provided"));
|
|
||||||
};
|
|
||||||
let oci = match image.image {
|
|
||||||
Some(Image::Oci(ref oci)) => oci,
|
|
||||||
None => {
|
|
||||||
return Err(anyhow!("oci spec not specified"));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let task = spec.task.as_ref().cloned().unwrap_or_default();
|
|
||||||
|
|
||||||
let image = self
|
|
||||||
.packer
|
|
||||||
.recall(
|
|
||||||
&oci.digest,
|
|
||||||
match oci.format() {
|
|
||||||
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
|
||||||
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
|
||||||
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let Some(image) = image else {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"image {} in the requested format did not exist",
|
|
||||||
oci.digest
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
let info = self
|
|
||||||
.runtime
|
|
||||||
.launch(GuestLaunchRequest {
|
|
||||||
format: LaunchPackedFormat::Squashfs,
|
|
||||||
uuid: Some(uuid),
|
|
||||||
name: if spec.name.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(spec.name.clone())
|
|
||||||
},
|
|
||||||
image,
|
|
||||||
vcpus: spec.vcpus,
|
|
||||||
mem: spec.mem,
|
|
||||||
env: task
|
|
||||||
.environment
|
|
||||||
.iter()
|
|
||||||
.map(|x| (x.key.clone(), x.value.clone()))
|
|
||||||
.collect::<HashMap<_, _>>(),
|
|
||||||
run: empty_vec_optional(task.command.clone()),
|
|
||||||
debug: false,
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
info!("started guest {}", uuid);
|
|
||||||
guest.state = Some(GuestState {
|
|
||||||
status: GuestStatus::Started.into(),
|
|
||||||
network: Some(guestinfo_to_networkstate(&info)),
|
|
||||||
exit_info: None,
|
|
||||||
error_info: None,
|
|
||||||
domid: info.domid,
|
|
||||||
});
|
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||||
@ -305,13 +278,20 @@ impl GuestReconciler {
|
|||||||
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let domid = guest.state.as_ref().map(|x| x.domid);
|
||||||
|
|
||||||
|
if let Some(domid) = domid {
|
||||||
|
self.glt.remove(uuid, domid).await;
|
||||||
|
}
|
||||||
|
|
||||||
info!("destroyed guest {}", uuid);
|
info!("destroyed guest {}", uuid);
|
||||||
guest.state = Some(GuestState {
|
guest.state = Some(GuestState {
|
||||||
status: GuestStatus::Destroyed.into(),
|
status: GuestStatus::Destroyed.into(),
|
||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
domid: guest.state.as_ref().map(|x| x.domid).unwrap_or(u32::MAX),
|
host: self.glt.host_uuid().to_string(),
|
||||||
|
domid: domid.unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||||
}
|
}
|
||||||
@ -356,15 +336,7 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
|
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
|
||||||
if value.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
|
|
||||||
GuestNetworkState {
|
GuestNetworkState {
|
||||||
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
182
crates/daemon/src/reconcile/guest/start.rs
Normal file
182
crates/daemon/src/reconcile/guest/start.rs
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use krata::launchcfg::LaunchPackedFormat;
|
||||||
|
use krata::v1::common::GuestOciImageSpec;
|
||||||
|
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
|
||||||
|
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||||
|
use kratart::{launch::GuestLaunchRequest, Runtime};
|
||||||
|
use log::info;
|
||||||
|
|
||||||
|
use tokio::fs::{self, File};
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
use tokio_tar::Archive;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
glt::GuestLookupTable,
|
||||||
|
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
|
||||||
|
};
|
||||||
|
|
||||||
|
// if a kernel is >= 100MB, that's kinda scary.
|
||||||
|
const OCI_SPEC_TAR_FILE_MAX_SIZE: usize = 100 * 1024 * 1024;
|
||||||
|
|
||||||
|
pub struct GuestStarter<'a> {
|
||||||
|
pub kernel_path: &'a Path,
|
||||||
|
pub initrd_path: &'a Path,
|
||||||
|
pub packer: &'a OciPackerService,
|
||||||
|
pub glt: &'a GuestLookupTable,
|
||||||
|
pub runtime: &'a Runtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GuestStarter<'_> {
|
||||||
|
pub async fn oci_spec_tar_read_file(
|
||||||
|
&self,
|
||||||
|
file: &Path,
|
||||||
|
oci: &GuestOciImageSpec,
|
||||||
|
) -> Result<Vec<u8>> {
|
||||||
|
if oci.format() != OciImageFormat::Tar {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"oci image spec for {} is required to be in tar format",
|
||||||
|
oci.digest
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let image = self
|
||||||
|
.packer
|
||||||
|
.recall(&oci.digest, OciPackedFormat::Tar)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let Some(image) = image else {
|
||||||
|
return Err(anyhow!("image {} was not found in tar format", oci.digest));
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut archive = Archive::new(File::open(&image.path).await?);
|
||||||
|
let mut entries = archive.entries()?;
|
||||||
|
while let Some(entry) = entries.next().await {
|
||||||
|
let mut entry = entry?;
|
||||||
|
let path = entry.path()?;
|
||||||
|
if entry.header().size()? as usize > OCI_SPEC_TAR_FILE_MAX_SIZE {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"file {} in image {} is larger than the size limit",
|
||||||
|
file.to_string_lossy(),
|
||||||
|
oci.digest
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if path == file {
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
entry.read_to_end(&mut buffer).await?;
|
||||||
|
return Ok(buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(anyhow!(
|
||||||
|
"unable to find file {} in image {}",
|
||||||
|
file.to_string_lossy(),
|
||||||
|
oci.digest
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||||
|
let Some(ref spec) = guest.spec else {
|
||||||
|
return Err(anyhow!("guest spec not specified"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(ref image) = spec.image else {
|
||||||
|
return Err(anyhow!("image spec not provided"));
|
||||||
|
};
|
||||||
|
let oci = match image.image {
|
||||||
|
Some(Image::Oci(ref oci)) => oci,
|
||||||
|
None => {
|
||||||
|
return Err(anyhow!("oci spec not specified"));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let task = spec.task.as_ref().cloned().unwrap_or_default();
|
||||||
|
|
||||||
|
let image = self
|
||||||
|
.packer
|
||||||
|
.recall(
|
||||||
|
&oci.digest,
|
||||||
|
match oci.format() {
|
||||||
|
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
||||||
|
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||||
|
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||||
|
OciImageFormat::Tar => {
|
||||||
|
return Err(anyhow!("tar image format is not supported for guests"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let Some(image) = image else {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"image {} in the requested format did not exist",
|
||||||
|
oci.digest
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
let kernel = if let Some(ref spec) = spec.kernel {
|
||||||
|
let Some(Image::Oci(ref oci)) = spec.image else {
|
||||||
|
return Err(anyhow!("kernel image spec must be an oci image"));
|
||||||
|
};
|
||||||
|
self.oci_spec_tar_read_file(&PathBuf::from("kernel/image"), oci)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
fs::read(&self.kernel_path).await?
|
||||||
|
};
|
||||||
|
let initrd = if let Some(ref spec) = spec.initrd {
|
||||||
|
let Some(Image::Oci(ref oci)) = spec.image else {
|
||||||
|
return Err(anyhow!("initrd image spec must be an oci image"));
|
||||||
|
};
|
||||||
|
self.oci_spec_tar_read_file(&PathBuf::from("krata/initrd"), oci)
|
||||||
|
.await?
|
||||||
|
} else {
|
||||||
|
fs::read(&self.initrd_path).await?
|
||||||
|
};
|
||||||
|
|
||||||
|
let info = self
|
||||||
|
.runtime
|
||||||
|
.launch(GuestLaunchRequest {
|
||||||
|
format: LaunchPackedFormat::Squashfs,
|
||||||
|
uuid: Some(uuid),
|
||||||
|
name: if spec.name.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(spec.name.clone())
|
||||||
|
},
|
||||||
|
image,
|
||||||
|
kernel,
|
||||||
|
initrd,
|
||||||
|
vcpus: spec.vcpus,
|
||||||
|
mem: spec.mem,
|
||||||
|
env: task
|
||||||
|
.environment
|
||||||
|
.iter()
|
||||||
|
.map(|x| (x.key.clone(), x.value.clone()))
|
||||||
|
.collect::<HashMap<_, _>>(),
|
||||||
|
run: empty_vec_optional(task.command.clone()),
|
||||||
|
debug: false,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
self.glt.associate(uuid, info.domid).await;
|
||||||
|
info!("started guest {}", uuid);
|
||||||
|
guest.state = Some(GuestState {
|
||||||
|
status: GuestStatus::Started.into(),
|
||||||
|
network: Some(guestinfo_to_networkstate(&info)),
|
||||||
|
exit_info: None,
|
||||||
|
error_info: None,
|
||||||
|
host: self.glt.host_uuid().to_string(),
|
||||||
|
domid: info.domid,
|
||||||
|
});
|
||||||
|
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
|
||||||
|
if value.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(value)
|
||||||
|
}
|
||||||
|
}
|
@ -14,8 +14,8 @@ cgroups-rs = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
ipnetwork = { workspace = true }
|
ipnetwork = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.9" }
|
krata = { path = "../krata", version = "^0.0.10" }
|
||||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
|
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.10" }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
|
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
|
||||||
|
@ -1,15 +1,17 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
childwait::{ChildEvent, ChildWait},
|
childwait::{ChildEvent, ChildWait},
|
||||||
death,
|
death,
|
||||||
|
exec::GuestExecTask,
|
||||||
metrics::MetricsCollector,
|
metrics::MetricsCollector,
|
||||||
};
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use cgroups_rs::Cgroup;
|
use cgroups_rs::Cgroup;
|
||||||
use krata::idm::{
|
use krata::idm::{
|
||||||
client::IdmClient,
|
client::{IdmClientStreamResponseHandle, IdmInternalClient},
|
||||||
protocol::{
|
internal::{
|
||||||
idm_event::Event, idm_request::Request, idm_response::Response, IdmEvent, IdmExitEvent,
|
event::Event as EventType, request::Request as RequestType,
|
||||||
IdmMetricsResponse, IdmPingResponse, IdmRequest,
|
response::Response as ResponseType, Event, ExecStreamResponseUpdate, ExitEvent,
|
||||||
|
MetricsResponse, PingResponse, Request, Response,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
@ -17,14 +19,18 @@ use nix::unistd::Pid;
|
|||||||
use tokio::{select, sync::broadcast};
|
use tokio::{select, sync::broadcast};
|
||||||
|
|
||||||
pub struct GuestBackground {
|
pub struct GuestBackground {
|
||||||
idm: IdmClient,
|
idm: IdmInternalClient,
|
||||||
child: Pid,
|
child: Pid,
|
||||||
_cgroup: Cgroup,
|
_cgroup: Cgroup,
|
||||||
wait: ChildWait,
|
wait: ChildWait,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestBackground {
|
impl GuestBackground {
|
||||||
pub async fn new(idm: IdmClient, cgroup: Cgroup, child: Pid) -> Result<GuestBackground> {
|
pub async fn new(
|
||||||
|
idm: IdmInternalClient,
|
||||||
|
cgroup: Cgroup,
|
||||||
|
child: Pid,
|
||||||
|
) -> Result<GuestBackground> {
|
||||||
Ok(GuestBackground {
|
Ok(GuestBackground {
|
||||||
idm,
|
idm,
|
||||||
child,
|
child,
|
||||||
@ -36,11 +42,11 @@ impl GuestBackground {
|
|||||||
pub async fn run(&mut self) -> Result<()> {
|
pub async fn run(&mut self) -> Result<()> {
|
||||||
let mut event_subscription = self.idm.subscribe().await?;
|
let mut event_subscription = self.idm.subscribe().await?;
|
||||||
let mut requests_subscription = self.idm.requests().await?;
|
let mut requests_subscription = self.idm.requests().await?;
|
||||||
|
let mut request_streams_subscription = self.idm.request_streams().await?;
|
||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = event_subscription.recv() => match x {
|
x = event_subscription.recv() => match x {
|
||||||
Ok(_event) => {
|
Ok(_event) => {
|
||||||
|
|
||||||
},
|
},
|
||||||
|
|
||||||
Err(broadcast::error::RecvError::Closed) => {
|
Err(broadcast::error::RecvError::Closed) => {
|
||||||
@ -54,8 +60,23 @@ impl GuestBackground {
|
|||||||
},
|
},
|
||||||
|
|
||||||
x = requests_subscription.recv() => match x {
|
x = requests_subscription.recv() => match x {
|
||||||
Ok(request) => {
|
Ok((id, request)) => {
|
||||||
self.handle_idm_request(request).await?;
|
self.handle_idm_request(id, request).await?;
|
||||||
|
},
|
||||||
|
|
||||||
|
Err(broadcast::error::RecvError::Closed) => {
|
||||||
|
debug!("idm packet channel closed");
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
x = request_streams_subscription.recv() => match x {
|
||||||
|
Ok(handle) => {
|
||||||
|
self.handle_idm_stream_request(handle).await?;
|
||||||
},
|
},
|
||||||
|
|
||||||
Err(broadcast::error::RecvError::Closed) => {
|
Err(broadcast::error::RecvError::Closed) => {
|
||||||
@ -79,25 +100,56 @@ impl GuestBackground {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_idm_request(&mut self, packet: IdmRequest) -> Result<()> {
|
async fn handle_idm_request(&mut self, id: u64, packet: Request) -> Result<()> {
|
||||||
let id = packet.id;
|
|
||||||
|
|
||||||
match packet.request {
|
match packet.request {
|
||||||
Some(Request::Ping(_)) => {
|
Some(RequestType::Ping(_)) => {
|
||||||
self.idm
|
self.idm
|
||||||
.respond(id, Response::Ping(IdmPingResponse {}))
|
.respond(
|
||||||
|
id,
|
||||||
|
Response {
|
||||||
|
response: Some(ResponseType::Ping(PingResponse {})),
|
||||||
|
},
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(Request::Metrics(_)) => {
|
Some(RequestType::Metrics(_)) => {
|
||||||
let metrics = MetricsCollector::new()?;
|
let metrics = MetricsCollector::new()?;
|
||||||
let root = metrics.collect()?;
|
let root = metrics.collect()?;
|
||||||
let response = IdmMetricsResponse { root: Some(root) };
|
let response = Response {
|
||||||
|
response: Some(ResponseType::Metrics(MetricsResponse { root: Some(root) })),
|
||||||
|
};
|
||||||
|
|
||||||
self.idm.respond(id, Response::Metrics(response)).await?;
|
self.idm.respond(id, response).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
None => {}
|
_ => {}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_idm_stream_request(
|
||||||
|
&mut self,
|
||||||
|
handle: IdmClientStreamResponseHandle<Request>,
|
||||||
|
) -> Result<()> {
|
||||||
|
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let exec = GuestExecTask { handle };
|
||||||
|
if let Err(error) = exec.run().await {
|
||||||
|
let _ = exec
|
||||||
|
.handle
|
||||||
|
.respond(Response {
|
||||||
|
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
|
||||||
|
exited: true,
|
||||||
|
error: error.to_string(),
|
||||||
|
exit_code: -1,
|
||||||
|
stdout: vec![],
|
||||||
|
stderr: vec![],
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -105,8 +157,8 @@ impl GuestBackground {
|
|||||||
async fn child_event(&mut self, event: ChildEvent) -> Result<()> {
|
async fn child_event(&mut self, event: ChildEvent) -> Result<()> {
|
||||||
if event.pid == self.child {
|
if event.pid == self.child {
|
||||||
self.idm
|
self.idm
|
||||||
.emit(IdmEvent {
|
.emit(Event {
|
||||||
event: Some(Event::Exit(IdmExitEvent { code: event.status })),
|
event: Some(EventType::Exit(ExitEvent { code: event.status })),
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
death(event.status).await?;
|
death(event.status).await?;
|
||||||
|
172
crates/guest/src/exec.rs
Normal file
172
crates/guest/src/exec.rs
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
use std::{collections::HashMap, process::Stdio};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use krata::idm::{
|
||||||
|
client::IdmClientStreamResponseHandle,
|
||||||
|
internal::{
|
||||||
|
exec_stream_request_update::Update, request::Request as RequestType,
|
||||||
|
ExecStreamResponseUpdate,
|
||||||
|
},
|
||||||
|
internal::{response::Response as ResponseType, Request, Response},
|
||||||
|
};
|
||||||
|
use tokio::{
|
||||||
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
|
join,
|
||||||
|
process::Command,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct GuestExecTask {
|
||||||
|
pub handle: IdmClientStreamResponseHandle<Request>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GuestExecTask {
|
||||||
|
pub async fn run(&self) -> Result<()> {
|
||||||
|
let mut receiver = self.handle.take().await?;
|
||||||
|
|
||||||
|
let Some(ref request) = self.handle.initial.request else {
|
||||||
|
return Err(anyhow!("request was empty"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let RequestType::ExecStream(update) = request else {
|
||||||
|
return Err(anyhow!("request was not an exec update"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(Update::Start(ref start)) = update.update else {
|
||||||
|
return Err(anyhow!("first request did not contain a start update"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cmd = start.command.clone();
|
||||||
|
if cmd.is_empty() {
|
||||||
|
return Err(anyhow!("command line was empty"));
|
||||||
|
}
|
||||||
|
let exe = cmd.remove(0);
|
||||||
|
let mut env = HashMap::new();
|
||||||
|
for entry in &start.environment {
|
||||||
|
env.insert(entry.key.clone(), entry.value.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !env.contains_key("PATH") {
|
||||||
|
env.insert(
|
||||||
|
"PATH".to_string(),
|
||||||
|
"/bin:/usr/bin:/usr/local/bin".to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let dir = if start.working_directory.is_empty() {
|
||||||
|
"/".to_string()
|
||||||
|
} else {
|
||||||
|
start.working_directory.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut child = Command::new(exe)
|
||||||
|
.args(cmd)
|
||||||
|
.envs(env)
|
||||||
|
.current_dir(dir)
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::piped())
|
||||||
|
.kill_on_drop(true)
|
||||||
|
.spawn()
|
||||||
|
.map_err(|error| anyhow!("failed to spawn: {}", error))?;
|
||||||
|
|
||||||
|
let mut stdin = child
|
||||||
|
.stdin
|
||||||
|
.take()
|
||||||
|
.ok_or_else(|| anyhow!("stdin was missing"))?;
|
||||||
|
let mut stdout = child
|
||||||
|
.stdout
|
||||||
|
.take()
|
||||||
|
.ok_or_else(|| anyhow!("stdout was missing"))?;
|
||||||
|
let mut stderr = child
|
||||||
|
.stderr
|
||||||
|
.take()
|
||||||
|
.ok_or_else(|| anyhow!("stderr was missing"))?;
|
||||||
|
|
||||||
|
let stdout_handle = self.handle.clone();
|
||||||
|
let stdout_task = tokio::task::spawn(async move {
|
||||||
|
let mut stdout_buffer = vec![0u8; 8 * 1024];
|
||||||
|
loop {
|
||||||
|
let Ok(size) = stdout.read(&mut stdout_buffer).await else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
if size > 0 {
|
||||||
|
let response = Response {
|
||||||
|
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
|
||||||
|
exited: false,
|
||||||
|
exit_code: 0,
|
||||||
|
error: String::new(),
|
||||||
|
stdout: stdout_buffer[0..size].to_vec(),
|
||||||
|
stderr: vec![],
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
let _ = stdout_handle.respond(response).await;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let stderr_handle = self.handle.clone();
|
||||||
|
let stderr_task = tokio::task::spawn(async move {
|
||||||
|
let mut stderr_buffer = vec![0u8; 8 * 1024];
|
||||||
|
loop {
|
||||||
|
let Ok(size) = stderr.read(&mut stderr_buffer).await else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
if size > 0 {
|
||||||
|
let response = Response {
|
||||||
|
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
|
||||||
|
exited: false,
|
||||||
|
exit_code: 0,
|
||||||
|
error: String::new(),
|
||||||
|
stdout: vec![],
|
||||||
|
stderr: stderr_buffer[0..size].to_vec(),
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
let _ = stderr_handle.respond(response).await;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let stdin_task = tokio::task::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let Some(request) = receiver.recv().await else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(RequestType::ExecStream(update)) = request.request else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(Update::Stdin(update)) = update.update else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
if stdin.write_all(&update.data).await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let exit = child.wait().await?;
|
||||||
|
let code = exit.code().unwrap_or(-1);
|
||||||
|
|
||||||
|
let _ = join!(stdout_task, stderr_task);
|
||||||
|
stdin_task.abort();
|
||||||
|
|
||||||
|
let response = Response {
|
||||||
|
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
|
||||||
|
exited: true,
|
||||||
|
exit_code: code,
|
||||||
|
error: String::new(),
|
||||||
|
stdout: vec![],
|
||||||
|
stderr: vec![],
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
self.handle.respond(response).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -3,7 +3,8 @@ use cgroups_rs::{Cgroup, CgroupPid};
|
|||||||
use futures::stream::TryStreamExt;
|
use futures::stream::TryStreamExt;
|
||||||
use ipnetwork::IpNetwork;
|
use ipnetwork::IpNetwork;
|
||||||
use krata::ethtool::EthtoolHandle;
|
use krata::ethtool::EthtoolHandle;
|
||||||
use krata::idm::client::IdmClient;
|
use krata::idm::client::IdmInternalClient;
|
||||||
|
use krata::idm::internal::INTERNAL_IDM_CHANNEL;
|
||||||
use krata::launchcfg::{LaunchInfo, LaunchNetwork, LaunchPackedFormat};
|
use krata::launchcfg::{LaunchInfo, LaunchNetwork, LaunchPackedFormat};
|
||||||
use libc::{sethostname, setsid, TIOCSCTTY};
|
use libc::{sethostname, setsid, TIOCSCTTY};
|
||||||
use log::{trace, warn};
|
use log::{trace, warn};
|
||||||
@ -77,7 +78,7 @@ impl GuestInit {
|
|||||||
Err(error) => warn!("failed to open console: {}", error),
|
Err(error) => warn!("failed to open console: {}", error),
|
||||||
};
|
};
|
||||||
|
|
||||||
let idm = IdmClient::open("/dev/hvc1")
|
let idm = IdmInternalClient::open(INTERNAL_IDM_CHANNEL, "/dev/hvc1")
|
||||||
.await
|
.await
|
||||||
.map_err(|x| anyhow!("failed to open idm client: {}", x))?;
|
.map_err(|x| anyhow!("failed to open idm client: {}", x))?;
|
||||||
self.mount_config_image().await?;
|
self.mount_config_image().await?;
|
||||||
@ -438,7 +439,12 @@ impl GuestInit {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run(&mut self, config: &Config, launch: &LaunchInfo, idm: IdmClient) -> Result<()> {
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
config: &Config,
|
||||||
|
launch: &LaunchInfo,
|
||||||
|
idm: IdmInternalClient,
|
||||||
|
) -> Result<()> {
|
||||||
let mut cmd = match config.cmd() {
|
let mut cmd = match config.cmd() {
|
||||||
None => vec![],
|
None => vec![],
|
||||||
Some(value) => value.clone(),
|
Some(value) => value.clone(),
|
||||||
@ -473,7 +479,7 @@ impl GuestInit {
|
|||||||
env.insert("TERM".to_string(), "xterm".to_string());
|
env.insert("TERM".to_string(), "xterm".to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
let path = GuestInit::resolve_executable(&env, path.into())?;
|
let path = resolve_executable(&env, path.into())?;
|
||||||
let Some(file_name) = path.file_name() else {
|
let Some(file_name) = path.file_name() else {
|
||||||
return Err(anyhow!("cannot get file name of command path"));
|
return Err(anyhow!("cannot get file name of command path"));
|
||||||
};
|
};
|
||||||
@ -531,27 +537,6 @@ impl GuestInit {
|
|||||||
map
|
map
|
||||||
}
|
}
|
||||||
|
|
||||||
fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
|
|
||||||
if path.is_absolute() {
|
|
||||||
return Ok(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.is_file() {
|
|
||||||
return Ok(path.absolutize()?.to_path_buf());
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(path_var) = env.get("PATH") {
|
|
||||||
for item in path_var.split(':') {
|
|
||||||
let mut exe_path: PathBuf = item.into();
|
|
||||||
exe_path.push(&path);
|
|
||||||
if exe_path.is_file() {
|
|
||||||
return Ok(exe_path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn env_list(env: HashMap<String, String>) -> Vec<String> {
|
fn env_list(env: HashMap<String, String>) -> Vec<String> {
|
||||||
env.iter()
|
env.iter()
|
||||||
.map(|(key, value)| format!("{}={}", key, value))
|
.map(|(key, value)| format!("{}={}", key, value))
|
||||||
@ -560,7 +545,7 @@ impl GuestInit {
|
|||||||
|
|
||||||
async fn fork_and_exec(
|
async fn fork_and_exec(
|
||||||
&mut self,
|
&mut self,
|
||||||
idm: IdmClient,
|
idm: IdmInternalClient,
|
||||||
cgroup: Cgroup,
|
cgroup: Cgroup,
|
||||||
working_dir: String,
|
working_dir: String,
|
||||||
path: CString,
|
path: CString,
|
||||||
@ -596,9 +581,35 @@ impl GuestInit {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn background(&mut self, idm: IdmClient, cgroup: Cgroup, executed: Pid) -> Result<()> {
|
async fn background(
|
||||||
|
&mut self,
|
||||||
|
idm: IdmInternalClient,
|
||||||
|
cgroup: Cgroup,
|
||||||
|
executed: Pid,
|
||||||
|
) -> Result<()> {
|
||||||
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
|
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
|
||||||
background.run().await?;
|
background.run().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
|
||||||
|
if path.is_absolute() {
|
||||||
|
return Ok(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.is_file() {
|
||||||
|
return Ok(path.absolutize()?.to_path_buf());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(path_var) = env.get("PATH") {
|
||||||
|
for item in path_var.split(':') {
|
||||||
|
let mut exe_path: PathBuf = item.into();
|
||||||
|
exe_path.push(&path);
|
||||||
|
if exe_path.is_file() {
|
||||||
|
return Ok(exe_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
@ -6,6 +6,7 @@ use xenstore::{XsdClient, XsdInterface};
|
|||||||
|
|
||||||
pub mod background;
|
pub mod background;
|
||||||
pub mod childwait;
|
pub mod childwait;
|
||||||
|
pub mod exec;
|
||||||
pub mod init;
|
pub mod init;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::{ops::Add, path::Path};
|
use std::{ops::Add, path::Path};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::idm::protocol::{IdmMetricFormat, IdmMetricNode};
|
use krata::idm::internal::{MetricFormat, MetricNode};
|
||||||
use sysinfo::Process;
|
use sysinfo::Process;
|
||||||
|
|
||||||
pub struct MetricsCollector {}
|
pub struct MetricsCollector {}
|
||||||
@ -11,9 +11,9 @@ impl MetricsCollector {
|
|||||||
Ok(MetricsCollector {})
|
Ok(MetricsCollector {})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn collect(&self) -> Result<IdmMetricNode> {
|
pub fn collect(&self) -> Result<MetricNode> {
|
||||||
let mut sysinfo = sysinfo::System::new();
|
let mut sysinfo = sysinfo::System::new();
|
||||||
Ok(IdmMetricNode::structural(
|
Ok(MetricNode::structural(
|
||||||
"guest",
|
"guest",
|
||||||
vec![
|
vec![
|
||||||
self.collect_system(&mut sysinfo)?,
|
self.collect_system(&mut sysinfo)?,
|
||||||
@ -22,22 +22,22 @@ impl MetricsCollector {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
|
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
|
||||||
sysinfo.refresh_memory();
|
sysinfo.refresh_memory();
|
||||||
Ok(IdmMetricNode::structural(
|
Ok(MetricNode::structural(
|
||||||
"system",
|
"system",
|
||||||
vec![IdmMetricNode::structural(
|
vec![MetricNode::structural(
|
||||||
"memory",
|
"memory",
|
||||||
vec![
|
vec![
|
||||||
IdmMetricNode::value("total", sysinfo.total_memory(), IdmMetricFormat::Bytes),
|
MetricNode::value("total", sysinfo.total_memory(), MetricFormat::Bytes),
|
||||||
IdmMetricNode::value("used", sysinfo.used_memory(), IdmMetricFormat::Bytes),
|
MetricNode::value("used", sysinfo.used_memory(), MetricFormat::Bytes),
|
||||||
IdmMetricNode::value("free", sysinfo.free_memory(), IdmMetricFormat::Bytes),
|
MetricNode::value("free", sysinfo.free_memory(), MetricFormat::Bytes),
|
||||||
],
|
],
|
||||||
)],
|
)],
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
|
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
|
||||||
sysinfo.refresh_processes();
|
sysinfo.refresh_processes();
|
||||||
let mut processes = Vec::new();
|
let mut processes = Vec::new();
|
||||||
let mut sysinfo_processes = sysinfo.processes().values().collect::<Vec<_>>();
|
let mut sysinfo_processes = sysinfo.processes().values().collect::<Vec<_>>();
|
||||||
@ -48,71 +48,68 @@ impl MetricsCollector {
|
|||||||
}
|
}
|
||||||
processes.push(MetricsCollector::process_node(process)?);
|
processes.push(MetricsCollector::process_node(process)?);
|
||||||
}
|
}
|
||||||
Ok(IdmMetricNode::structural("process", processes))
|
Ok(MetricNode::structural("process", processes))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_node(process: &Process) -> Result<IdmMetricNode> {
|
fn process_node(process: &Process) -> Result<MetricNode> {
|
||||||
let mut metrics = vec![];
|
let mut metrics = vec![];
|
||||||
|
|
||||||
if let Some(parent) = process.parent() {
|
if let Some(parent) = process.parent() {
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"parent",
|
"parent",
|
||||||
parent.as_u32() as u64,
|
parent.as_u32() as u64,
|
||||||
IdmMetricFormat::Integer,
|
MetricFormat::Integer,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(exe) = process.exe().and_then(path_as_str) {
|
if let Some(exe) = process.exe().and_then(path_as_str) {
|
||||||
metrics.push(IdmMetricNode::raw_value("executable", exe));
|
metrics.push(MetricNode::raw_value("executable", exe));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(working_directory) = process.cwd().and_then(path_as_str) {
|
if let Some(working_directory) = process.cwd().and_then(path_as_str) {
|
||||||
metrics.push(IdmMetricNode::raw_value("cwd", working_directory));
|
metrics.push(MetricNode::raw_value("cwd", working_directory));
|
||||||
}
|
}
|
||||||
|
|
||||||
let cmdline = process.cmd().to_vec();
|
let cmdline = process.cmd().to_vec();
|
||||||
metrics.push(IdmMetricNode::raw_value("cmdline", cmdline));
|
metrics.push(MetricNode::raw_value("cmdline", cmdline));
|
||||||
metrics.push(IdmMetricNode::structural(
|
metrics.push(MetricNode::structural(
|
||||||
"memory",
|
"memory",
|
||||||
vec![
|
vec![
|
||||||
IdmMetricNode::value("resident", process.memory(), IdmMetricFormat::Bytes),
|
MetricNode::value("resident", process.memory(), MetricFormat::Bytes),
|
||||||
IdmMetricNode::value("virtual", process.virtual_memory(), IdmMetricFormat::Bytes),
|
MetricNode::value("virtual", process.virtual_memory(), MetricFormat::Bytes),
|
||||||
],
|
],
|
||||||
));
|
));
|
||||||
|
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"lifetime",
|
"lifetime",
|
||||||
process.run_time(),
|
process.run_time(),
|
||||||
IdmMetricFormat::DurationSeconds,
|
MetricFormat::DurationSeconds,
|
||||||
));
|
));
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"uid",
|
"uid",
|
||||||
process.user_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
process.user_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
||||||
IdmMetricFormat::Integer,
|
MetricFormat::Integer,
|
||||||
));
|
));
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"gid",
|
"gid",
|
||||||
process.group_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
process.group_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
||||||
IdmMetricFormat::Integer,
|
MetricFormat::Integer,
|
||||||
));
|
));
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"euid",
|
"euid",
|
||||||
process
|
process
|
||||||
.effective_user_id()
|
.effective_user_id()
|
||||||
.map(|x| (*x).add(0))
|
.map(|x| (*x).add(0))
|
||||||
.unwrap_or(0) as f64,
|
.unwrap_or(0) as f64,
|
||||||
IdmMetricFormat::Integer,
|
MetricFormat::Integer,
|
||||||
));
|
));
|
||||||
metrics.push(IdmMetricNode::value(
|
metrics.push(MetricNode::value(
|
||||||
"egid",
|
"egid",
|
||||||
process.effective_group_id().map(|x| x.add(0)).unwrap_or(0) as f64,
|
process.effective_group_id().map(|x| x.add(0)).unwrap_or(0) as f64,
|
||||||
IdmMetricFormat::Integer,
|
MetricFormat::Integer,
|
||||||
));
|
));
|
||||||
|
|
||||||
Ok(IdmMetricNode::structural(
|
Ok(MetricNode::structural(process.pid().to_string(), metrics))
|
||||||
process.pid().to_string(),
|
|
||||||
metrics,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,12 +6,20 @@ fn main() -> Result<()> {
|
|||||||
.descriptor_pool("crate::DESCRIPTOR_POOL")
|
.descriptor_pool("crate::DESCRIPTOR_POOL")
|
||||||
.configure(
|
.configure(
|
||||||
&mut config,
|
&mut config,
|
||||||
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
|
&[
|
||||||
|
"proto/krata/v1/control.proto",
|
||||||
|
"proto/krata/idm/transport.proto",
|
||||||
|
"proto/krata/idm/internal.proto",
|
||||||
|
],
|
||||||
&["proto/"],
|
&["proto/"],
|
||||||
)?;
|
)?;
|
||||||
tonic_build::configure().compile_with_config(
|
tonic_build::configure().compile_with_config(
|
||||||
config,
|
config,
|
||||||
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
|
&[
|
||||||
|
"proto/krata/v1/control.proto",
|
||||||
|
"proto/krata/idm/transport.proto",
|
||||||
|
"proto/krata/idm/internal.proto",
|
||||||
|
],
|
||||||
&["proto/"],
|
&["proto/"],
|
||||||
)?;
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package krata.bus.idm;
|
|
||||||
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_package = "dev.krata.proto.bus.idm";
|
|
||||||
option java_outer_classname = "IdmProto";
|
|
||||||
|
|
||||||
import "google/protobuf/struct.proto";
|
|
||||||
|
|
||||||
message IdmPacket {
|
|
||||||
oneof content {
|
|
||||||
IdmEvent event = 1;
|
|
||||||
IdmRequest request = 2;
|
|
||||||
IdmResponse response = 3;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmEvent {
|
|
||||||
oneof event {
|
|
||||||
IdmExitEvent exit = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmExitEvent {
|
|
||||||
int32 code = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmRequest {
|
|
||||||
uint64 id = 1;
|
|
||||||
oneof request {
|
|
||||||
IdmPingRequest ping = 2;
|
|
||||||
IdmMetricsRequest metrics = 3;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmPingRequest {}
|
|
||||||
|
|
||||||
message IdmMetricsRequest {}
|
|
||||||
|
|
||||||
message IdmResponse {
|
|
||||||
uint64 id = 1;
|
|
||||||
oneof response {
|
|
||||||
IdmPingResponse ping = 2;
|
|
||||||
IdmMetricsResponse metrics = 3;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmPingResponse {}
|
|
||||||
|
|
||||||
message IdmMetricsResponse {
|
|
||||||
IdmMetricNode root = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message IdmMetricNode {
|
|
||||||
string name = 1;
|
|
||||||
google.protobuf.Value value = 2;
|
|
||||||
IdmMetricFormat format = 3;
|
|
||||||
repeated IdmMetricNode children = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum IdmMetricFormat {
|
|
||||||
IDM_METRIC_FORMAT_UNKNOWN = 0;
|
|
||||||
IDM_METRIC_FORMAT_BYTES = 1;
|
|
||||||
IDM_METRIC_FORMAT_INTEGER = 2;
|
|
||||||
IDM_METRIC_FORMAT_DURATION_SECONDS = 3;
|
|
||||||
}
|
|
89
crates/krata/proto/krata/idm/internal.proto
Normal file
89
crates/krata/proto/krata/idm/internal.proto
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package krata.idm.internal;
|
||||||
|
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option java_package = "dev.krata.proto.idm.internal";
|
||||||
|
option java_outer_classname = "IdmInternalProto";
|
||||||
|
|
||||||
|
import "google/protobuf/struct.proto";
|
||||||
|
|
||||||
|
message ExitEvent {
|
||||||
|
int32 code = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PingRequest {}
|
||||||
|
|
||||||
|
message PingResponse {}
|
||||||
|
|
||||||
|
message MetricsRequest {}
|
||||||
|
|
||||||
|
message MetricsResponse {
|
||||||
|
MetricNode root = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MetricNode {
|
||||||
|
string name = 1;
|
||||||
|
google.protobuf.Value value = 2;
|
||||||
|
MetricFormat format = 3;
|
||||||
|
repeated MetricNode children = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum MetricFormat {
|
||||||
|
METRIC_FORMAT_UNKNOWN = 0;
|
||||||
|
METRIC_FORMAT_BYTES = 1;
|
||||||
|
METRIC_FORMAT_INTEGER = 2;
|
||||||
|
METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecEnvVar {
|
||||||
|
string key = 1;
|
||||||
|
string value = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecStreamRequestStart {
|
||||||
|
repeated ExecEnvVar environment = 1;
|
||||||
|
repeated string command = 2;
|
||||||
|
string working_directory = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecStreamRequestStdin {
|
||||||
|
bytes data = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecStreamRequestUpdate {
|
||||||
|
oneof update {
|
||||||
|
ExecStreamRequestStart start = 1;
|
||||||
|
ExecStreamRequestStdin stdin = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecStreamResponseUpdate {
|
||||||
|
bool exited = 1;
|
||||||
|
string error = 2;
|
||||||
|
int32 exit_code = 3;
|
||||||
|
bytes stdout = 4;
|
||||||
|
bytes stderr = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Event {
|
||||||
|
oneof event {
|
||||||
|
ExitEvent exit = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message Request {
|
||||||
|
oneof request {
|
||||||
|
PingRequest ping = 1;
|
||||||
|
MetricsRequest metrics = 2;
|
||||||
|
ExecStreamRequestUpdate exec_stream = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message Response {
|
||||||
|
oneof response {
|
||||||
|
PingResponse ping = 1;
|
||||||
|
MetricsResponse metrics = 2;
|
||||||
|
ExecStreamResponseUpdate exec_stream = 3;
|
||||||
|
}
|
||||||
|
}
|
27
crates/krata/proto/krata/idm/transport.proto
Normal file
27
crates/krata/proto/krata/idm/transport.proto
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package krata.idm.transport;
|
||||||
|
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option java_package = "dev.krata.proto.idm.transport";
|
||||||
|
option java_outer_classname = "IdmTransportProto";
|
||||||
|
|
||||||
|
message IdmTransportPacket {
|
||||||
|
uint64 id = 1;
|
||||||
|
uint64 channel = 2;
|
||||||
|
IdmTransportPacketForm form = 3;
|
||||||
|
bytes data = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum IdmTransportPacketForm {
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_UNKNOWN = 0;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_RAW = 1;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_EVENT = 2;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_REQUEST = 3;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_RESPONSE = 4;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST = 5;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_UPDATE = 6;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_UPDATE = 7;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_CLOSED = 8;
|
||||||
|
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_CLOSED = 9;
|
||||||
|
}
|
@ -17,10 +17,14 @@ message Guest {
|
|||||||
message GuestSpec {
|
message GuestSpec {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
GuestImageSpec image = 2;
|
GuestImageSpec image = 2;
|
||||||
uint32 vcpus = 3;
|
// If not specified, defaults to the daemon default kernel.
|
||||||
uint64 mem = 4;
|
GuestImageSpec kernel = 3;
|
||||||
GuestTaskSpec task = 5;
|
// If not specified, defaults to the daemon default initrd.
|
||||||
repeated GuestSpecAnnotation annotations = 6;
|
GuestImageSpec initrd = 4;
|
||||||
|
uint32 vcpus = 5;
|
||||||
|
uint64 mem = 6;
|
||||||
|
GuestTaskSpec task = 7;
|
||||||
|
repeated GuestSpecAnnotation annotations = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestImageSpec {
|
message GuestImageSpec {
|
||||||
@ -29,20 +33,23 @@ message GuestImageSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestOciImageFormat {
|
enum OciImageFormat {
|
||||||
GUEST_OCI_IMAGE_FORMAT_UNKNOWN = 0;
|
OCI_IMAGE_FORMAT_UNKNOWN = 0;
|
||||||
GUEST_OCI_IMAGE_FORMAT_SQUASHFS = 1;
|
OCI_IMAGE_FORMAT_SQUASHFS = 1;
|
||||||
GUEST_OCI_IMAGE_FORMAT_EROFS = 2;
|
OCI_IMAGE_FORMAT_EROFS = 2;
|
||||||
|
// Tar format is not launchable, and is intended for kernel images.
|
||||||
|
OCI_IMAGE_FORMAT_TAR = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestOciImageSpec {
|
message GuestOciImageSpec {
|
||||||
string digest = 1;
|
string digest = 1;
|
||||||
GuestOciImageFormat format = 2;
|
OciImageFormat format = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpec {
|
message GuestTaskSpec {
|
||||||
repeated GuestTaskSpecEnvVar environment = 1;
|
repeated GuestTaskSpecEnvVar environment = 1;
|
||||||
repeated string command = 2;
|
repeated string command = 2;
|
||||||
|
string working_directory = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpecEnvVar {
|
message GuestTaskSpecEnvVar {
|
||||||
@ -60,7 +67,8 @@ message GuestState {
|
|||||||
GuestNetworkState network = 2;
|
GuestNetworkState network = 2;
|
||||||
GuestExitInfo exit_info = 3;
|
GuestExitInfo exit_info = 3;
|
||||||
GuestErrorInfo error_info = 4;
|
GuestErrorInfo error_info = 4;
|
||||||
uint32 domid = 5;
|
string host = 5;
|
||||||
|
uint32 domid = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestStatus {
|
enum GuestStatus {
|
||||||
|
@ -6,15 +6,19 @@ option java_multiple_files = true;
|
|||||||
option java_package = "dev.krata.proto.v1.control";
|
option java_package = "dev.krata.proto.v1.control";
|
||||||
option java_outer_classname = "ControlProto";
|
option java_outer_classname = "ControlProto";
|
||||||
|
|
||||||
import "krata/bus/idm.proto";
|
import "krata/idm/transport.proto";
|
||||||
import "krata/v1/common.proto";
|
import "krata/v1/common.proto";
|
||||||
|
|
||||||
service ControlService {
|
service ControlService {
|
||||||
|
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
||||||
|
|
||||||
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
|
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
|
||||||
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
|
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
|
||||||
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
|
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
|
||||||
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
|
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
|
||||||
|
|
||||||
|
rpc ExecGuest(stream ExecGuestRequest) returns (stream ExecGuestReply);
|
||||||
|
|
||||||
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
|
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
|
||||||
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
|
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
|
||||||
|
|
||||||
@ -24,6 +28,14 @@ service ControlService {
|
|||||||
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
|
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message IdentifyHostRequest {}
|
||||||
|
|
||||||
|
message IdentifyHostReply {
|
||||||
|
string host_uuid = 1;
|
||||||
|
uint32 host_domid = 2;
|
||||||
|
string krata_version = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message CreateGuestRequest {
|
message CreateGuestRequest {
|
||||||
krata.v1.common.GuestSpec spec = 1;
|
krata.v1.common.GuestSpec spec = 1;
|
||||||
}
|
}
|
||||||
@ -52,6 +64,20 @@ message ListGuestsReply {
|
|||||||
repeated krata.v1.common.Guest guests = 1;
|
repeated krata.v1.common.Guest guests = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ExecGuestRequest {
|
||||||
|
string guest_id = 1;
|
||||||
|
krata.v1.common.GuestTaskSpec task = 2;
|
||||||
|
bytes data = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecGuestReply {
|
||||||
|
bool exited = 1;
|
||||||
|
string error = 2;
|
||||||
|
int32 exit_code = 3;
|
||||||
|
bytes stdout = 4;
|
||||||
|
bytes stderr = 5;
|
||||||
|
}
|
||||||
|
|
||||||
message ConsoleDataRequest {
|
message ConsoleDataRequest {
|
||||||
string guest_id = 1;
|
string guest_id = 1;
|
||||||
bytes data = 2;
|
bytes data = 2;
|
||||||
@ -84,51 +110,80 @@ message ReadGuestMetricsReply {
|
|||||||
message SnoopIdmRequest {}
|
message SnoopIdmRequest {}
|
||||||
|
|
||||||
message SnoopIdmReply {
|
message SnoopIdmReply {
|
||||||
uint32 from = 1;
|
string from = 1;
|
||||||
uint32 to = 2;
|
string to = 2;
|
||||||
krata.bus.idm.IdmPacket packet = 3;
|
krata.idm.transport.IdmTransportPacket packet = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum PullImageProgressLayerPhase {
|
message ImageProgress {
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
|
ImageProgressPhase phase = 1;
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
|
repeated ImageProgressLayer layers = 2;
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
|
ImageProgressIndication indication = 3;
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
|
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
|
|
||||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message PullImageProgressLayer {
|
enum ImageProgressPhase {
|
||||||
|
IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
|
||||||
|
IMAGE_PROGRESS_PHASE_STARTED = 1;
|
||||||
|
IMAGE_PROGRESS_PHASE_RESOLVING = 2;
|
||||||
|
IMAGE_PROGRESS_PHASE_RESOLVED = 3;
|
||||||
|
IMAGE_PROGRESS_PHASE_CONFIG_DOWNLOAD = 4;
|
||||||
|
IMAGE_PROGRESS_PHASE_LAYER_DOWNLOAD = 5;
|
||||||
|
IMAGE_PROGRESS_PHASE_ASSEMBLE = 6;
|
||||||
|
IMAGE_PROGRESS_PHASE_PACK = 7;
|
||||||
|
IMAGE_PROGRESS_PHASE_COMPLETE = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageProgressLayer {
|
||||||
string id = 1;
|
string id = 1;
|
||||||
PullImageProgressLayerPhase phase = 2;
|
ImageProgressLayerPhase phase = 2;
|
||||||
uint64 value = 3;
|
ImageProgressIndication indication = 3;
|
||||||
uint64 total = 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum PullImageProgressPhase {
|
enum ImageProgressLayerPhase {
|
||||||
PULL_IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
|
IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_RESOLVING = 1;
|
IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_RESOLVED = 2;
|
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_CONFIG_ACQUIRE = 3;
|
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_LAYER_ACQUIRE = 4;
|
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_PACKING = 5;
|
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
|
||||||
PULL_IMAGE_PROGRESS_PHASE_COMPLETE = 6;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message PullImageProgress {
|
message ImageProgressIndication {
|
||||||
PullImageProgressPhase phase = 1;
|
oneof indication {
|
||||||
repeated PullImageProgressLayer layers = 2;
|
ImageProgressIndicationBar bar = 1;
|
||||||
uint64 value = 3;
|
ImageProgressIndicationSpinner spinner = 2;
|
||||||
uint64 total = 4;
|
ImageProgressIndicationHidden hidden = 3;
|
||||||
|
ImageProgressIndicationCompleted completed = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageProgressIndicationBar {
|
||||||
|
string message = 1;
|
||||||
|
uint64 current = 2;
|
||||||
|
uint64 total = 3;
|
||||||
|
bool is_bytes = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageProgressIndicationSpinner {
|
||||||
|
string message = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ImageProgressIndicationHidden {}
|
||||||
|
|
||||||
|
message ImageProgressIndicationCompleted {
|
||||||
|
string message = 1;
|
||||||
|
uint64 total = 2;
|
||||||
|
bool is_bytes = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PullImageRequest {
|
message PullImageRequest {
|
||||||
string image = 1;
|
string image = 1;
|
||||||
krata.v1.common.GuestOciImageFormat format = 2;
|
krata.v1.common.OciImageFormat format = 2;
|
||||||
|
bool overwrite_cache = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PullImageReply {
|
message PullImageReply {
|
||||||
PullImageProgress progress = 1;
|
ImageProgress progress = 1;
|
||||||
string digest = 2;
|
string digest = 2;
|
||||||
krata.v1.common.GuestOciImageFormat format = 3;
|
krata.v1.common.OciImageFormat format = 3;
|
||||||
}
|
}
|
||||||
|
@ -1 +0,0 @@
|
|||||||
pub mod idm;
|
|
@ -8,10 +8,6 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::protocol::{
|
|
||||||
idm_packet::Content, idm_request::Request, idm_response::Response, IdmEvent, IdmPacket,
|
|
||||||
IdmRequest, IdmResponse,
|
|
||||||
};
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use log::{debug, error};
|
use log::{debug, error};
|
||||||
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
|
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
|
||||||
@ -22,14 +18,23 @@ use tokio::{
|
|||||||
select,
|
select,
|
||||||
sync::{
|
sync::{
|
||||||
broadcast,
|
broadcast,
|
||||||
mpsc::{channel, Receiver, Sender},
|
mpsc::{self, Receiver, Sender},
|
||||||
oneshot, Mutex,
|
oneshot, Mutex,
|
||||||
},
|
},
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
time::timeout,
|
time::timeout,
|
||||||
};
|
};
|
||||||
|
|
||||||
type RequestMap = Arc<Mutex<HashMap<u64, oneshot::Sender<IdmResponse>>>>;
|
use super::{
|
||||||
|
internal,
|
||||||
|
serialize::{IdmRequest, IdmSerializable},
|
||||||
|
transport::{IdmTransportPacket, IdmTransportPacketForm},
|
||||||
|
};
|
||||||
|
|
||||||
|
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
|
||||||
|
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
|
||||||
|
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
|
||||||
|
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
|
||||||
|
|
||||||
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
||||||
const IDM_REQUEST_TIMEOUT_SECS: u64 = 10;
|
const IDM_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||||
@ -37,8 +42,8 @@ const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait IdmBackend: Send {
|
pub trait IdmBackend: Send {
|
||||||
async fn recv(&mut self) -> Result<IdmPacket>;
|
async fn recv(&mut self) -> Result<IdmTransportPacket>;
|
||||||
async fn send(&mut self, packet: IdmPacket) -> Result<()>;
|
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct IdmFileBackend {
|
pub struct IdmFileBackend {
|
||||||
@ -66,30 +71,30 @@ impl IdmFileBackend {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl IdmBackend for IdmFileBackend {
|
impl IdmBackend for IdmFileBackend {
|
||||||
async fn recv(&mut self) -> Result<IdmPacket> {
|
async fn recv(&mut self) -> Result<IdmTransportPacket> {
|
||||||
let mut fd = self.read_fd.lock().await;
|
let mut fd = self.read_fd.lock().await;
|
||||||
let mut guard = fd.readable_mut().await?;
|
let mut guard = fd.readable_mut().await?;
|
||||||
let b1 = guard.get_inner_mut().read_u8().await?;
|
let b1 = guard.get_inner_mut().read_u8().await?;
|
||||||
if b1 != 0xff {
|
if b1 != 0xff {
|
||||||
return Ok(IdmPacket::default());
|
return Ok(IdmTransportPacket::default());
|
||||||
}
|
}
|
||||||
let b2 = guard.get_inner_mut().read_u8().await?;
|
let b2 = guard.get_inner_mut().read_u8().await?;
|
||||||
if b2 != 0xff {
|
if b2 != 0xff {
|
||||||
return Ok(IdmPacket::default());
|
return Ok(IdmTransportPacket::default());
|
||||||
}
|
}
|
||||||
let size = guard.get_inner_mut().read_u32_le().await?;
|
let size = guard.get_inner_mut().read_u32_le().await?;
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
return Ok(IdmPacket::default());
|
return Ok(IdmTransportPacket::default());
|
||||||
}
|
}
|
||||||
let mut buffer = vec![0u8; size as usize];
|
let mut buffer = vec![0u8; size as usize];
|
||||||
guard.get_inner_mut().read_exact(&mut buffer).await?;
|
guard.get_inner_mut().read_exact(&mut buffer).await?;
|
||||||
match IdmPacket::decode(buffer.as_slice()) {
|
match IdmTransportPacket::decode(buffer.as_slice()) {
|
||||||
Ok(packet) => Ok(packet),
|
Ok(packet) => Ok(packet),
|
||||||
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
|
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
|
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
|
||||||
let mut file = self.write.lock().await;
|
let mut file = self.write.lock().await;
|
||||||
let data = packet.encode_to_vec();
|
let data = packet.encode_to_vec();
|
||||||
file.write_all(&[0xff, 0xff]).await?;
|
file.write_all(&[0xff, 0xff]).await?;
|
||||||
@ -100,16 +105,19 @@ impl IdmBackend for IdmFileBackend {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct IdmClient {
|
pub struct IdmClient<R: IdmRequest, E: IdmSerializable> {
|
||||||
request_backend_sender: broadcast::Sender<IdmRequest>,
|
channel: u64,
|
||||||
|
request_backend_sender: broadcast::Sender<(u64, R)>,
|
||||||
|
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
|
||||||
next_request_id: Arc<Mutex<u64>>,
|
next_request_id: Arc<Mutex<u64>>,
|
||||||
event_receiver_sender: broadcast::Sender<IdmEvent>,
|
event_receiver_sender: broadcast::Sender<E>,
|
||||||
tx_sender: Sender<IdmPacket>,
|
tx_sender: Sender<IdmTransportPacket>,
|
||||||
requests: RequestMap,
|
requests: OneshotRequestMap<R>,
|
||||||
|
request_streams: StreamRequestMap<R>,
|
||||||
task: Arc<JoinHandle<()>>,
|
task: Arc<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for IdmClient {
|
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClient<R, E> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
if Arc::strong_count(&self.task) <= 1 {
|
if Arc::strong_count(&self.task) <= 1 {
|
||||||
self.task.abort();
|
self.task.abort();
|
||||||
@ -117,21 +125,122 @@ impl Drop for IdmClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IdmClient {
|
pub struct IdmClientStreamRequestHandle<R: IdmRequest, E: IdmSerializable> {
|
||||||
pub async fn new(backend: Box<dyn IdmBackend>) -> Result<IdmClient> {
|
pub id: u64,
|
||||||
|
pub receiver: Receiver<R::Response>,
|
||||||
|
pub client: IdmClient<R, E>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: IdmRequest, E: IdmSerializable> IdmClientStreamRequestHandle<R, E> {
|
||||||
|
pub async fn update(&self, request: R) -> Result<()> {
|
||||||
|
self.client
|
||||||
|
.tx_sender
|
||||||
|
.send(IdmTransportPacket {
|
||||||
|
id: self.id,
|
||||||
|
channel: self.client.channel,
|
||||||
|
form: IdmTransportPacketForm::StreamRequestUpdate.into(),
|
||||||
|
data: request.encode()?,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClientStreamRequestHandle<R, E> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let id = self.id;
|
||||||
|
let client = self.client.clone();
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let _ = client
|
||||||
|
.tx_sender
|
||||||
|
.send(IdmTransportPacket {
|
||||||
|
id,
|
||||||
|
channel: client.channel,
|
||||||
|
form: IdmTransportPacketForm::StreamRequestClosed.into(),
|
||||||
|
data: vec![],
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct IdmClientStreamResponseHandle<R: IdmRequest> {
|
||||||
|
pub initial: R,
|
||||||
|
pub id: u64,
|
||||||
|
channel: u64,
|
||||||
|
tx_sender: Sender<IdmTransportPacket>,
|
||||||
|
receiver: Arc<Mutex<Option<Receiver<R>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: IdmRequest> IdmClientStreamResponseHandle<R> {
|
||||||
|
pub async fn respond(&self, response: R::Response) -> Result<()> {
|
||||||
|
self.tx_sender
|
||||||
|
.send(IdmTransportPacket {
|
||||||
|
id: self.id,
|
||||||
|
channel: self.channel,
|
||||||
|
form: IdmTransportPacketForm::StreamResponseUpdate.into(),
|
||||||
|
data: response.encode()?,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn take(&self) -> Result<Receiver<R>> {
|
||||||
|
let mut guard = self.receiver.lock().await;
|
||||||
|
let Some(receiver) = (*guard).take() else {
|
||||||
|
return Err(anyhow!("request has already been claimed!"));
|
||||||
|
};
|
||||||
|
Ok(receiver)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: IdmRequest> Drop for IdmClientStreamResponseHandle<R> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.receiver) <= 1 {
|
||||||
|
let id = self.id;
|
||||||
|
let channel = self.channel;
|
||||||
|
let tx_sender = self.tx_sender.clone();
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let _ = tx_sender
|
||||||
|
.send(IdmTransportPacket {
|
||||||
|
id,
|
||||||
|
channel,
|
||||||
|
form: IdmTransportPacketForm::StreamResponseClosed.into(),
|
||||||
|
data: vec![],
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
|
||||||
|
pub async fn new(channel: u64, backend: Box<dyn IdmBackend>) -> Result<Self> {
|
||||||
let requests = Arc::new(Mutex::new(HashMap::new()));
|
let requests = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
let request_streams = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
let request_update_streams = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let (event_sender, event_receiver) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
let (event_sender, event_receiver) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
||||||
let (internal_request_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
let (internal_request_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
||||||
let (tx_sender, tx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
|
let (internal_request_stream_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
||||||
|
let (tx_sender, tx_receiver) = mpsc::channel(IDM_PACKET_QUEUE_LEN);
|
||||||
let backend_event_sender = event_sender.clone();
|
let backend_event_sender = event_sender.clone();
|
||||||
let request_backend_sender = internal_request_backend_sender.clone();
|
let request_backend_sender = internal_request_backend_sender.clone();
|
||||||
|
let request_stream_backend_sender = internal_request_stream_backend_sender.clone();
|
||||||
let requests_for_client = requests.clone();
|
let requests_for_client = requests.clone();
|
||||||
|
let request_streams_for_client = request_streams.clone();
|
||||||
|
let tx_sender_for_client = tx_sender.clone();
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
if let Err(error) = IdmClient::process(
|
if let Err(error) = IdmClient::process(
|
||||||
backend,
|
backend,
|
||||||
|
channel,
|
||||||
|
tx_sender,
|
||||||
backend_event_sender,
|
backend_event_sender,
|
||||||
requests,
|
requests,
|
||||||
|
request_streams,
|
||||||
|
request_update_streams,
|
||||||
internal_request_backend_sender,
|
internal_request_backend_sender,
|
||||||
|
internal_request_stream_backend_sender,
|
||||||
event_receiver,
|
event_receiver,
|
||||||
tx_receiver,
|
tx_receiver,
|
||||||
)
|
)
|
||||||
@ -141,16 +250,19 @@ impl IdmClient {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(IdmClient {
|
Ok(IdmClient {
|
||||||
|
channel,
|
||||||
next_request_id: Arc::new(Mutex::new(0)),
|
next_request_id: Arc::new(Mutex::new(0)),
|
||||||
event_receiver_sender: event_sender.clone(),
|
event_receiver_sender: event_sender.clone(),
|
||||||
request_backend_sender,
|
request_backend_sender,
|
||||||
|
request_stream_backend_sender,
|
||||||
requests: requests_for_client,
|
requests: requests_for_client,
|
||||||
tx_sender,
|
request_streams: request_streams_for_client,
|
||||||
|
tx_sender: tx_sender_for_client,
|
||||||
task: Arc::new(task),
|
task: Arc::new(task),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn open<P: AsRef<Path>>(path: P) -> Result<IdmClient> {
|
pub async fn open<P: AsRef<Path>>(channel: u64, path: P) -> Result<Self> {
|
||||||
let read_file = File::options()
|
let read_file = File::options()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(false)
|
.write(false)
|
||||||
@ -164,39 +276,54 @@ impl IdmClient {
|
|||||||
.open(path)
|
.open(path)
|
||||||
.await?;
|
.await?;
|
||||||
let backend = IdmFileBackend::new(read_file, write_file).await?;
|
let backend = IdmFileBackend::new(read_file, write_file).await?;
|
||||||
IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await
|
IdmClient::new(channel, Box::new(backend) as Box<dyn IdmBackend>).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn emit(&self, event: IdmEvent) -> Result<()> {
|
pub async fn emit<T: IdmSerializable>(&self, event: T) -> Result<()> {
|
||||||
|
let id = {
|
||||||
|
let mut guard = self.next_request_id.lock().await;
|
||||||
|
let req = *guard;
|
||||||
|
*guard = req.wrapping_add(1);
|
||||||
|
req
|
||||||
|
};
|
||||||
self.tx_sender
|
self.tx_sender
|
||||||
.send(IdmPacket {
|
.send(IdmTransportPacket {
|
||||||
content: Some(Content::Event(event)),
|
id,
|
||||||
|
form: IdmTransportPacketForm::Event.into(),
|
||||||
|
channel: self.channel,
|
||||||
|
data: event.encode()?,
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn requests(&self) -> Result<broadcast::Receiver<IdmRequest>> {
|
pub async fn requests(&self) -> Result<broadcast::Receiver<(u64, R)>> {
|
||||||
Ok(self.request_backend_sender.subscribe())
|
Ok(self.request_backend_sender.subscribe())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn respond(&self, id: u64, response: Response) -> Result<()> {
|
pub async fn request_streams(
|
||||||
let packet = IdmPacket {
|
&self,
|
||||||
content: Some(Content::Response(IdmResponse {
|
) -> Result<broadcast::Receiver<IdmClientStreamResponseHandle<R>>> {
|
||||||
id,
|
Ok(self.request_stream_backend_sender.subscribe())
|
||||||
response: Some(response),
|
}
|
||||||
})),
|
|
||||||
|
pub async fn respond<T: IdmSerializable>(&self, id: u64, response: T) -> Result<()> {
|
||||||
|
let packet = IdmTransportPacket {
|
||||||
|
id,
|
||||||
|
form: IdmTransportPacketForm::Response.into(),
|
||||||
|
channel: self.channel,
|
||||||
|
data: response.encode()?,
|
||||||
};
|
};
|
||||||
self.tx_sender.send(packet).await?;
|
self.tx_sender.send(packet).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subscribe(&self) -> Result<broadcast::Receiver<IdmEvent>> {
|
pub async fn subscribe(&self) -> Result<broadcast::Receiver<E>> {
|
||||||
Ok(self.event_receiver_sender.subscribe())
|
Ok(self.event_receiver_sender.subscribe())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send(&self, request: Request) -> Result<Response> {
|
pub async fn send(&self, request: R) -> Result<R::Response> {
|
||||||
let (sender, receiver) = oneshot::channel::<IdmResponse>();
|
let (sender, receiver) = oneshot::channel::<R::Response>();
|
||||||
let req = {
|
let req = {
|
||||||
let mut guard = self.next_request_id.lock().await;
|
let mut guard = self.next_request_id.lock().await;
|
||||||
let req = *guard;
|
let req = *guard;
|
||||||
@ -217,52 +344,135 @@ impl IdmClient {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
self.tx_sender
|
self.tx_sender
|
||||||
.send(IdmPacket {
|
.send(IdmTransportPacket {
|
||||||
content: Some(Content::Request(IdmRequest {
|
id: req,
|
||||||
id: req,
|
channel: self.channel,
|
||||||
request: Some(request),
|
form: IdmTransportPacketForm::Request.into(),
|
||||||
})),
|
data: request.encode()?,
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let response = timeout(Duration::from_secs(IDM_REQUEST_TIMEOUT_SECS), receiver).await??;
|
let response = timeout(Duration::from_secs(IDM_REQUEST_TIMEOUT_SECS), receiver).await??;
|
||||||
success.store(true, Ordering::Release);
|
success.store(true, Ordering::Release);
|
||||||
if let Some(response) = response.response {
|
Ok(response)
|
||||||
Ok(response)
|
|
||||||
} else {
|
|
||||||
Err(anyhow!("response did not contain any content"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn send_stream(&self, request: R) -> Result<IdmClientStreamRequestHandle<R, E>> {
|
||||||
|
let (sender, receiver) = mpsc::channel::<R::Response>(100);
|
||||||
|
let req = {
|
||||||
|
let mut guard = self.next_request_id.lock().await;
|
||||||
|
let req = *guard;
|
||||||
|
*guard = req.wrapping_add(1);
|
||||||
|
req
|
||||||
|
};
|
||||||
|
let mut requests = self.request_streams.lock().await;
|
||||||
|
requests.insert(req, sender);
|
||||||
|
drop(requests);
|
||||||
|
self.tx_sender
|
||||||
|
.send(IdmTransportPacket {
|
||||||
|
id: req,
|
||||||
|
channel: self.channel,
|
||||||
|
form: IdmTransportPacketForm::StreamRequest.into(),
|
||||||
|
data: request.encode()?,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok(IdmClientStreamRequestHandle {
|
||||||
|
id: req,
|
||||||
|
receiver,
|
||||||
|
client: self.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn process(
|
async fn process(
|
||||||
mut backend: Box<dyn IdmBackend>,
|
mut backend: Box<dyn IdmBackend>,
|
||||||
event_sender: broadcast::Sender<IdmEvent>,
|
channel: u64,
|
||||||
requests: RequestMap,
|
tx_sender: Sender<IdmTransportPacket>,
|
||||||
request_backend_sender: broadcast::Sender<IdmRequest>,
|
event_sender: broadcast::Sender<E>,
|
||||||
_event_receiver: broadcast::Receiver<IdmEvent>,
|
requests: OneshotRequestMap<R>,
|
||||||
mut receiver: Receiver<IdmPacket>,
|
request_streams: StreamRequestMap<R>,
|
||||||
|
request_update_streams: StreamRequestUpdateMap<R>,
|
||||||
|
request_backend_sender: broadcast::Sender<(u64, R)>,
|
||||||
|
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
|
||||||
|
_event_receiver: broadcast::Receiver<E>,
|
||||||
|
mut receiver: Receiver<IdmTransportPacket>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = backend.recv() => match x {
|
x = backend.recv() => match x {
|
||||||
Ok(packet) => {
|
Ok(packet) => {
|
||||||
match packet.content {
|
if packet.channel != channel {
|
||||||
Some(Content::Event(event)) => {
|
continue;
|
||||||
let _ = event_sender.send(event);
|
}
|
||||||
},
|
|
||||||
|
|
||||||
Some(Content::Request(request)) => {
|
match packet.form() {
|
||||||
let _ = request_backend_sender.send(request);
|
IdmTransportPacketForm::Event => {
|
||||||
},
|
if let Ok(event) = E::decode(&packet.data) {
|
||||||
|
let _ = event_sender.send(event);
|
||||||
Some(Content::Response(response)) => {
|
|
||||||
let mut requests = requests.lock().await;
|
|
||||||
if let Some(sender) = requests.remove(&response.id) {
|
|
||||||
drop(requests);
|
|
||||||
let _ = sender.send(response);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
IdmTransportPacketForm::Request => {
|
||||||
|
if let Ok(request) = R::decode(&packet.data) {
|
||||||
|
let _ = request_backend_sender.send((packet.id, request));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
IdmTransportPacketForm::Response => {
|
||||||
|
let mut requests = requests.lock().await;
|
||||||
|
if let Some(sender) = requests.remove(&packet.id) {
|
||||||
|
drop(requests);
|
||||||
|
|
||||||
|
if let Ok(response) = R::Response::decode(&packet.data) {
|
||||||
|
let _ = sender.send(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
IdmTransportPacketForm::StreamRequest => {
|
||||||
|
if let Ok(request) = R::decode(&packet.data) {
|
||||||
|
let mut update_streams = request_update_streams.lock().await;
|
||||||
|
let (sender, receiver) = mpsc::channel(100);
|
||||||
|
update_streams.insert(packet.id, sender.clone());
|
||||||
|
let handle = IdmClientStreamResponseHandle {
|
||||||
|
initial: request,
|
||||||
|
id: packet.id,
|
||||||
|
channel,
|
||||||
|
tx_sender: tx_sender.clone(),
|
||||||
|
receiver: Arc::new(Mutex::new(Some(receiver))),
|
||||||
|
};
|
||||||
|
let _ = request_stream_backend_sender.send(handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
IdmTransportPacketForm::StreamRequestUpdate => {
|
||||||
|
if let Ok(request) = R::decode(&packet.data) {
|
||||||
|
let mut update_streams = request_update_streams.lock().await;
|
||||||
|
if let Some(stream) = update_streams.get_mut(&packet.id) {
|
||||||
|
let _ = stream.try_send(request);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
IdmTransportPacketForm::StreamRequestClosed => {
|
||||||
|
let mut update_streams = request_update_streams.lock().await;
|
||||||
|
update_streams.remove(&packet.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
IdmTransportPacketForm::StreamResponseUpdate => {
|
||||||
|
let requests = request_streams.lock().await;
|
||||||
|
if let Some(sender) = requests.get(&packet.id) {
|
||||||
|
if let Ok(response) = R::Response::decode(&packet.data) {
|
||||||
|
let _ = sender.try_send(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
IdmTransportPacketForm::StreamResponseClosed => {
|
||||||
|
let mut requests = request_streams.lock().await;
|
||||||
|
requests.remove(&packet.id);
|
||||||
|
}
|
||||||
|
|
||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1,26 +1,66 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use prost::Message;
|
||||||
use prost_types::{ListValue, Value};
|
use prost_types::{ListValue, Value};
|
||||||
|
|
||||||
include!(concat!(env!("OUT_DIR"), "/krata.bus.idm.rs"));
|
use super::serialize::{IdmRequest, IdmSerializable};
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/krata.idm.internal.rs"));
|
||||||
|
|
||||||
|
pub const INTERNAL_IDM_CHANNEL: u64 = 0;
|
||||||
|
|
||||||
|
impl IdmSerializable for Event {
|
||||||
|
fn encode(&self) -> Result<Vec<u8>> {
|
||||||
|
Ok(self.encode_to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(bytes: &[u8]) -> Result<Self> {
|
||||||
|
Ok(<Self as prost::Message>::decode(bytes)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IdmSerializable for Request {
|
||||||
|
fn encode(&self) -> Result<Vec<u8>> {
|
||||||
|
Ok(self.encode_to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(bytes: &[u8]) -> Result<Self> {
|
||||||
|
Ok(<Self as prost::Message>::decode(bytes)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IdmRequest for Request {
|
||||||
|
type Response = Response;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IdmSerializable for Response {
|
||||||
|
fn encode(&self) -> Result<Vec<u8>> {
|
||||||
|
Ok(self.encode_to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode(bytes: &[u8]) -> Result<Self> {
|
||||||
|
Ok(<Self as prost::Message>::decode(bytes)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait AsIdmMetricValue {
|
pub trait AsIdmMetricValue {
|
||||||
fn as_metric_value(&self) -> Value;
|
fn as_metric_value(&self) -> Value;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IdmMetricNode {
|
impl MetricNode {
|
||||||
pub fn structural<N: AsRef<str>>(name: N, children: Vec<IdmMetricNode>) -> IdmMetricNode {
|
pub fn structural<N: AsRef<str>>(name: N, children: Vec<MetricNode>) -> MetricNode {
|
||||||
IdmMetricNode {
|
MetricNode {
|
||||||
name: name.as_ref().to_string(),
|
name: name.as_ref().to_string(),
|
||||||
value: None,
|
value: None,
|
||||||
format: IdmMetricFormat::Unknown.into(),
|
format: MetricFormat::Unknown.into(),
|
||||||
children,
|
children,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> IdmMetricNode {
|
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> MetricNode {
|
||||||
IdmMetricNode {
|
MetricNode {
|
||||||
name: name.as_ref().to_string(),
|
name: name.as_ref().to_string(),
|
||||||
value: Some(value.as_metric_value()),
|
value: Some(value.as_metric_value()),
|
||||||
format: IdmMetricFormat::Unknown.into(),
|
format: MetricFormat::Unknown.into(),
|
||||||
children: vec![],
|
children: vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -28,9 +68,9 @@ impl IdmMetricNode {
|
|||||||
pub fn value<N: AsRef<str>, V: AsIdmMetricValue>(
|
pub fn value<N: AsRef<str>, V: AsIdmMetricValue>(
|
||||||
name: N,
|
name: N,
|
||||||
value: V,
|
value: V,
|
||||||
format: IdmMetricFormat,
|
format: MetricFormat,
|
||||||
) -> IdmMetricNode {
|
) -> MetricNode {
|
||||||
IdmMetricNode {
|
MetricNode {
|
||||||
name: name.as_ref().to_string(),
|
name: name.as_ref().to_string(),
|
||||||
value: Some(value.as_metric_value()),
|
value: Some(value.as_metric_value()),
|
||||||
format: format.into(),
|
format: format.into(),
|
@ -1,3 +1,5 @@
|
|||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
pub mod client;
|
pub mod client;
|
||||||
pub use crate::bus::idm as protocol;
|
pub mod internal;
|
||||||
|
pub mod serialize;
|
||||||
|
pub mod transport;
|
||||||
|
10
crates/krata/src/idm/serialize.rs
Normal file
10
crates/krata/src/idm/serialize.rs
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
pub trait IdmSerializable: Sized + Clone + Send + Sync + 'static {
|
||||||
|
fn decode(bytes: &[u8]) -> Result<Self>;
|
||||||
|
fn encode(&self) -> Result<Vec<u8>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait IdmRequest: IdmSerializable {
|
||||||
|
type Response: IdmSerializable;
|
||||||
|
}
|
1
crates/krata/src/idm/transport.rs
Normal file
1
crates/krata/src/idm/transport.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
include!(concat!(env!("OUT_DIR"), "/krata.idm.transport.rs"));
|
@ -1,7 +1,6 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use prost_reflect::DescriptorPool;
|
use prost_reflect::DescriptorPool;
|
||||||
|
|
||||||
pub mod bus;
|
|
||||||
pub mod v1;
|
pub mod v1;
|
||||||
|
|
||||||
pub mod client;
|
pub mod client;
|
||||||
|
@ -16,7 +16,7 @@ clap = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
etherparse = { workspace = true }
|
etherparse = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.9" }
|
krata = { path = "../krata", version = "^0.0.10" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
|
@ -5,10 +5,10 @@ use env_logger::Env;
|
|||||||
use krataoci::{
|
use krataoci::{
|
||||||
name::ImageName,
|
name::ImageName,
|
||||||
packer::{service::OciPackerService, OciPackedFormat},
|
packer::{service::OciPackerService, OciPackedFormat},
|
||||||
progress::{OciProgress, OciProgressContext},
|
progress::OciProgressContext,
|
||||||
registry::OciPlatform,
|
registry::OciPlatform,
|
||||||
};
|
};
|
||||||
use tokio::{fs, sync::mpsc::channel};
|
use tokio::fs;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
@ -22,27 +22,22 @@ async fn main() -> Result<()> {
|
|||||||
fs::create_dir(&cache_dir).await?;
|
fs::create_dir(&cache_dir).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (sender, mut receiver) = channel::<OciProgress>(100);
|
let (context, mut receiver) = OciProgressContext::create();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
let mut progresses = Vec::new();
|
if (receiver.changed().await).is_err() {
|
||||||
let _ = receiver.recv_many(&mut progresses, 100).await;
|
break;
|
||||||
let Some(progress) = progresses.last() else {
|
}
|
||||||
continue;
|
let progress = receiver.borrow_and_update();
|
||||||
};
|
|
||||||
println!("phase {:?}", progress.phase);
|
println!("phase {:?}", progress.phase);
|
||||||
for (id, layer) in &progress.layers {
|
for (id, layer) in &progress.layers {
|
||||||
println!(
|
println!("{} {:?} {:?}", id, layer.phase, layer.indication,)
|
||||||
"{} {:?} {} of {}",
|
|
||||||
id, layer.phase, layer.value, layer.total
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
let context = OciProgressContext::new(sender);
|
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current()).await?;
|
||||||
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current())?;
|
|
||||||
let packed = service
|
let packed = service
|
||||||
.request(image.clone(), OciPackedFormat::Squashfs, context)
|
.request(image.clone(), OciPackedFormat::Squashfs, false, context)
|
||||||
.await?;
|
.await?;
|
||||||
println!(
|
println!(
|
||||||
"generated squashfs of {} to {}",
|
"generated squashfs of {} to {}",
|
||||||
|
@ -1,22 +1,25 @@
|
|||||||
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
|
use crate::fetch::{OciImageFetcher, OciImageLayer, OciImageLayerReader, OciResolvedImage};
|
||||||
use crate::progress::OciBoundProgress;
|
use crate::progress::OciBoundProgress;
|
||||||
|
use crate::schema::OciSchema;
|
||||||
use crate::vfs::{VfsNode, VfsTree};
|
use crate::vfs::{VfsNode, VfsTree};
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use log::{debug, trace, warn};
|
use log::{debug, trace, warn};
|
||||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
|
||||||
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
use tokio::io::AsyncRead;
|
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
use tokio_tar::{Archive, Entry};
|
use tokio_tar::{Archive, Entry};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
pub struct OciImageAssembled {
|
pub struct OciImageAssembled {
|
||||||
pub digest: String,
|
pub digest: String,
|
||||||
pub manifest: ImageManifest,
|
pub descriptor: Descriptor,
|
||||||
pub config: ImageConfiguration,
|
pub manifest: OciSchema<ImageManifest>,
|
||||||
|
pub config: OciSchema<ImageConfiguration>,
|
||||||
pub vfs: Arc<VfsTree>,
|
pub vfs: Arc<VfsTree>,
|
||||||
pub tmp_dir: Option<PathBuf>,
|
pub tmp_dir: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
@ -33,11 +36,12 @@ impl Drop for OciImageAssembled {
|
|||||||
|
|
||||||
pub struct OciImageAssembler {
|
pub struct OciImageAssembler {
|
||||||
downloader: OciImageFetcher,
|
downloader: OciImageFetcher,
|
||||||
resolved: OciResolvedImage,
|
resolved: Option<OciResolvedImage>,
|
||||||
progress: OciBoundProgress,
|
progress: OciBoundProgress,
|
||||||
work_dir: PathBuf,
|
work_dir: PathBuf,
|
||||||
disk_dir: PathBuf,
|
disk_dir: PathBuf,
|
||||||
tmp_dir: Option<PathBuf>,
|
tmp_dir: Option<PathBuf>,
|
||||||
|
success: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciImageAssembler {
|
impl OciImageAssembler {
|
||||||
@ -81,11 +85,12 @@ impl OciImageAssembler {
|
|||||||
|
|
||||||
Ok(OciImageAssembler {
|
Ok(OciImageAssembler {
|
||||||
downloader,
|
downloader,
|
||||||
resolved,
|
resolved: Some(resolved),
|
||||||
progress,
|
progress,
|
||||||
work_dir,
|
work_dir,
|
||||||
disk_dir: target_dir,
|
disk_dir: target_dir,
|
||||||
tmp_dir,
|
tmp_dir,
|
||||||
|
success: AtomicBool::new(false),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,11 +102,11 @@ impl OciImageAssembler {
|
|||||||
self.assemble_with(&layer_dir).await
|
self.assemble_with(&layer_dir).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn assemble_with(self, layer_dir: &Path) -> Result<OciImageAssembled> {
|
async fn assemble_with(mut self, layer_dir: &Path) -> Result<OciImageAssembled> {
|
||||||
let local = self
|
let Some(ref resolved) = self.resolved else {
|
||||||
.downloader
|
return Err(anyhow!("resolved image was not available when expected"));
|
||||||
.download(self.resolved.clone(), layer_dir)
|
};
|
||||||
.await?;
|
let local = self.downloader.download(resolved, layer_dir).await?;
|
||||||
let mut vfs = VfsTree::new();
|
let mut vfs = VfsTree::new();
|
||||||
for layer in &local.layers {
|
for layer in &local.layers {
|
||||||
debug!(
|
debug!(
|
||||||
@ -110,12 +115,14 @@ impl OciImageAssembler {
|
|||||||
);
|
);
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.extracting_layer(&layer.digest, 0, 1);
|
progress.start_extracting_layer(&layer.digest);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
debug!("process layer digest={}", &layer.digest,);
|
debug!("process layer digest={}", &layer.digest,);
|
||||||
let mut archive = layer.archive().await?;
|
let mut archive = layer.archive().await?;
|
||||||
let mut entries = archive.entries()?;
|
let mut entries = archive.entries()?;
|
||||||
|
let mut count = 0u64;
|
||||||
|
let mut size = 0u64;
|
||||||
while let Some(entry) = entries.next().await {
|
while let Some(entry) = entries.next().await {
|
||||||
let mut entry = entry?;
|
let mut entry = entry?;
|
||||||
let path = entry.path()?;
|
let path = entry.path()?;
|
||||||
@ -129,14 +136,21 @@ impl OciImageAssembler {
|
|||||||
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
|
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
vfs.insert_tar_entry(&entry)?;
|
let reference = vfs.insert_tar_entry(&entry)?;
|
||||||
self.process_write_entry(&mut vfs, &mut entry, layer)
|
self.progress
|
||||||
|
.update(|progress| {
|
||||||
|
progress.extracting_layer(&layer.digest, &reference.name);
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
size += self
|
||||||
|
.process_write_entry(&mut vfs, &mut entry, layer)
|
||||||
.await?;
|
.await?;
|
||||||
|
count += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.extracted_layer(&layer.digest);
|
progress.extracted_layer(&layer.digest, count, size);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@ -145,19 +159,27 @@ impl OciImageAssembler {
|
|||||||
fs::remove_file(&layer.path).await?;
|
fs::remove_file(&layer.path).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(OciImageAssembled {
|
|
||||||
|
let Some(resolved) = self.resolved.take() else {
|
||||||
|
return Err(anyhow!("resolved image was not available when expected"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let assembled = OciImageAssembled {
|
||||||
vfs: Arc::new(vfs),
|
vfs: Arc::new(vfs),
|
||||||
digest: self.resolved.digest,
|
descriptor: resolved.descriptor,
|
||||||
manifest: self.resolved.manifest,
|
digest: resolved.digest,
|
||||||
|
manifest: resolved.manifest,
|
||||||
config: local.config,
|
config: local.config,
|
||||||
tmp_dir: self.tmp_dir,
|
tmp_dir: self.tmp_dir.clone(),
|
||||||
})
|
};
|
||||||
|
self.success.store(true, Ordering::Release);
|
||||||
|
Ok(assembled)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn process_whiteout_entry(
|
async fn process_whiteout_entry(
|
||||||
&self,
|
&self,
|
||||||
vfs: &mut VfsTree,
|
vfs: &mut VfsTree,
|
||||||
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
entry: &Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
|
||||||
name: &str,
|
name: &str,
|
||||||
layer: &OciImageLayer,
|
layer: &OciImageLayer,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
@ -198,11 +220,11 @@ impl OciImageAssembler {
|
|||||||
async fn process_write_entry(
|
async fn process_write_entry(
|
||||||
&self,
|
&self,
|
||||||
vfs: &mut VfsTree,
|
vfs: &mut VfsTree,
|
||||||
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
entry: &mut Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
|
||||||
layer: &OciImageLayer,
|
layer: &OciImageLayer,
|
||||||
) -> Result<()> {
|
) -> Result<u64> {
|
||||||
if !entry.header().entry_type().is_file() {
|
if !entry.header().entry_type().is_file() {
|
||||||
return Ok(());
|
return Ok(0);
|
||||||
}
|
}
|
||||||
trace!(
|
trace!(
|
||||||
"unpack entry layer={} path={:?} type={:?}",
|
"unpack entry layer={} path={:?} type={:?}",
|
||||||
@ -218,7 +240,19 @@ impl OciImageAssembler {
|
|||||||
.await?
|
.await?
|
||||||
.ok_or(anyhow!("unpack did not return a path"))?;
|
.ok_or(anyhow!("unpack did not return a path"))?;
|
||||||
vfs.set_disk_path(&entry.path()?, &path)?;
|
vfs.set_disk_path(&entry.path()?, &path)?;
|
||||||
Ok(())
|
Ok(entry.header().size()?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for OciImageAssembler {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if !self.success.load(Ordering::Acquire) {
|
||||||
|
if let Some(tmp_dir) = self.tmp_dir.clone() {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let _ = fs::remove_dir_all(tmp_dir).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,7 @@
|
|||||||
use crate::progress::{OciBoundProgress, OciProgressPhase};
|
use crate::{
|
||||||
|
progress::{OciBoundProgress, OciProgressPhase},
|
||||||
|
schema::OciSchema,
|
||||||
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
name::ImageName,
|
name::ImageName,
|
||||||
@ -6,6 +9,9 @@ use super::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
fmt::Debug,
|
||||||
|
io::SeekFrom,
|
||||||
|
os::unix::fs::MetadataExt,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
};
|
};
|
||||||
@ -14,12 +20,13 @@ use anyhow::{anyhow, Result};
|
|||||||
use async_compression::tokio::bufread::{GzipDecoder, ZstdDecoder};
|
use async_compression::tokio::bufread::{GzipDecoder, ZstdDecoder};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use oci_spec::image::{
|
use oci_spec::image::{
|
||||||
Descriptor, ImageConfiguration, ImageIndex, ImageManifest, MediaType, ToDockerV2S2,
|
Descriptor, DescriptorBuilder, ImageConfiguration, ImageIndex, ImageManifest, MediaType,
|
||||||
|
ToDockerV2S2,
|
||||||
};
|
};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs::File,
|
fs::{self, File},
|
||||||
io::{AsyncRead, AsyncReadExt, BufReader, BufWriter},
|
io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, BufWriter},
|
||||||
};
|
};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::StreamExt;
|
||||||
use tokio_tar::Archive;
|
use tokio_tar::Archive;
|
||||||
@ -39,16 +46,43 @@ pub enum OciImageLayerCompression {
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OciImageLayer {
|
pub struct OciImageLayer {
|
||||||
|
pub metadata: Descriptor,
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
pub digest: String,
|
pub digest: String,
|
||||||
pub compression: OciImageLayerCompression,
|
pub compression: OciImageLayerCompression,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait OciImageLayerReader: AsyncRead + Sync {
|
||||||
|
async fn position(&mut self) -> Result<u64>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl OciImageLayerReader for BufReader<File> {
|
||||||
|
async fn position(&mut self) -> Result<u64> {
|
||||||
|
Ok(self.seek(SeekFrom::Current(0)).await?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl OciImageLayerReader for GzipDecoder<BufReader<File>> {
|
||||||
|
async fn position(&mut self) -> Result<u64> {
|
||||||
|
self.get_mut().position().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl OciImageLayerReader for ZstdDecoder<BufReader<File>> {
|
||||||
|
async fn position(&mut self) -> Result<u64> {
|
||||||
|
self.get_mut().position().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl OciImageLayer {
|
impl OciImageLayer {
|
||||||
pub async fn decompress(&self) -> Result<Pin<Box<dyn AsyncRead + Send>>> {
|
pub async fn decompress(&self) -> Result<Pin<Box<dyn OciImageLayerReader + Send>>> {
|
||||||
let file = File::open(&self.path).await?;
|
let file = File::open(&self.path).await?;
|
||||||
let reader = BufReader::new(file);
|
let reader = BufReader::new(file);
|
||||||
let reader: Pin<Box<dyn AsyncRead + Send>> = match self.compression {
|
let reader: Pin<Box<dyn OciImageLayerReader + Send>> = match self.compression {
|
||||||
OciImageLayerCompression::None => Box::pin(reader),
|
OciImageLayerCompression::None => Box::pin(reader),
|
||||||
OciImageLayerCompression::Gzip => Box::pin(GzipDecoder::new(reader)),
|
OciImageLayerCompression::Gzip => Box::pin(GzipDecoder::new(reader)),
|
||||||
OciImageLayerCompression::Zstd => Box::pin(ZstdDecoder::new(reader)),
|
OciImageLayerCompression::Zstd => Box::pin(ZstdDecoder::new(reader)),
|
||||||
@ -56,7 +90,7 @@ impl OciImageLayer {
|
|||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn AsyncRead + Send>>>> {
|
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>> {
|
||||||
let decompress = self.decompress().await?;
|
let decompress = self.decompress().await?;
|
||||||
Ok(Archive::new(decompress))
|
Ok(Archive::new(decompress))
|
||||||
}
|
}
|
||||||
@ -66,13 +100,14 @@ impl OciImageLayer {
|
|||||||
pub struct OciResolvedImage {
|
pub struct OciResolvedImage {
|
||||||
pub name: ImageName,
|
pub name: ImageName,
|
||||||
pub digest: String,
|
pub digest: String,
|
||||||
pub manifest: ImageManifest,
|
pub descriptor: Descriptor,
|
||||||
|
pub manifest: OciSchema<ImageManifest>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OciLocalImage {
|
pub struct OciLocalImage {
|
||||||
pub image: OciResolvedImage,
|
pub image: OciResolvedImage,
|
||||||
pub config: ImageConfiguration,
|
pub config: OciSchema<ImageConfiguration>,
|
||||||
pub layers: Vec<OciImageLayer>,
|
pub layers: Vec<OciImageLayer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,10 +124,10 @@ impl OciImageFetcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load_seed_json_blob<T: DeserializeOwned>(
|
async fn load_seed_json_blob<T: Clone + Debug + DeserializeOwned>(
|
||||||
&self,
|
&self,
|
||||||
descriptor: &Descriptor,
|
descriptor: &Descriptor,
|
||||||
) -> Result<Option<T>> {
|
) -> Result<Option<OciSchema<T>>> {
|
||||||
let digest = descriptor.digest();
|
let digest = descriptor.digest();
|
||||||
let Some((digest_type, digest_content)) = digest.split_once(':') else {
|
let Some((digest_type, digest_content)) = digest.split_once(':') else {
|
||||||
return Err(anyhow!("digest content was not properly formatted"));
|
return Err(anyhow!("digest content was not properly formatted"));
|
||||||
@ -101,7 +136,10 @@ impl OciImageFetcher {
|
|||||||
self.load_seed_json(&want).await
|
self.load_seed_json(&want).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load_seed_json<T: DeserializeOwned>(&self, want: &str) -> Result<Option<T>> {
|
async fn load_seed_json<T: Clone + Debug + DeserializeOwned>(
|
||||||
|
&self,
|
||||||
|
want: &str,
|
||||||
|
) -> Result<Option<OciSchema<T>>> {
|
||||||
let Some(ref seed) = self.seed else {
|
let Some(ref seed) = self.seed else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
@ -113,10 +151,10 @@ impl OciImageFetcher {
|
|||||||
let mut entry = entry?;
|
let mut entry = entry?;
|
||||||
let path = String::from_utf8(entry.path_bytes().to_vec())?;
|
let path = String::from_utf8(entry.path_bytes().to_vec())?;
|
||||||
if path == want {
|
if path == want {
|
||||||
let mut content = String::new();
|
let mut content = Vec::new();
|
||||||
entry.read_to_string(&mut content).await?;
|
entry.read_to_end(&mut content).await?;
|
||||||
let data = serde_json::from_str::<T>(&content)?;
|
let item = serde_json::from_slice::<T>(&content)?;
|
||||||
return Ok(Some(data));
|
return Ok(Some(OciSchema::new(content, item)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@ -154,7 +192,7 @@ impl OciImageFetcher {
|
|||||||
|
|
||||||
if let Some(index) = self.load_seed_json::<ImageIndex>("index.json").await? {
|
if let Some(index) = self.load_seed_json::<ImageIndex>("index.json").await? {
|
||||||
let mut found: Option<&Descriptor> = None;
|
let mut found: Option<&Descriptor> = None;
|
||||||
for manifest in index.manifests() {
|
for manifest in index.item().manifests() {
|
||||||
let Some(annotations) = manifest.annotations() else {
|
let Some(annotations) = manifest.annotations() else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@ -179,6 +217,13 @@ impl OciImageFetcher {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(ref digest) = image.digest {
|
||||||
|
if digest != manifest.digest() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
found = Some(manifest);
|
found = Some(manifest);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -192,6 +237,7 @@ impl OciImageFetcher {
|
|||||||
);
|
);
|
||||||
return Ok(OciResolvedImage {
|
return Ok(OciResolvedImage {
|
||||||
name: image,
|
name: image,
|
||||||
|
descriptor: found.clone(),
|
||||||
digest: found.digest().clone(),
|
digest: found.digest().clone(),
|
||||||
manifest,
|
manifest,
|
||||||
});
|
});
|
||||||
@ -200,11 +246,20 @@ impl OciImageFetcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut client = OciRegistryClient::new(image.registry_url()?, self.platform.clone())?;
|
let mut client = OciRegistryClient::new(image.registry_url()?, self.platform.clone())?;
|
||||||
let (manifest, digest) = client
|
let (manifest, descriptor, digest) = client
|
||||||
.get_manifest_with_digest(&image.name, &image.reference)
|
.get_manifest_with_digest(&image.name, image.reference.as_ref(), image.digest.as_ref())
|
||||||
.await?;
|
.await?;
|
||||||
|
let descriptor = descriptor.unwrap_or_else(|| {
|
||||||
|
DescriptorBuilder::default()
|
||||||
|
.media_type(MediaType::ImageManifest)
|
||||||
|
.size(manifest.raw().len() as i64)
|
||||||
|
.digest(digest.clone())
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
Ok(OciResolvedImage {
|
Ok(OciResolvedImage {
|
||||||
name: image,
|
name: image,
|
||||||
|
descriptor,
|
||||||
digest,
|
digest,
|
||||||
manifest,
|
manifest,
|
||||||
})
|
})
|
||||||
@ -212,41 +267,44 @@ impl OciImageFetcher {
|
|||||||
|
|
||||||
pub async fn download(
|
pub async fn download(
|
||||||
&self,
|
&self,
|
||||||
image: OciResolvedImage,
|
image: &OciResolvedImage,
|
||||||
layer_dir: &Path,
|
layer_dir: &Path,
|
||||||
) -> Result<OciLocalImage> {
|
) -> Result<OciLocalImage> {
|
||||||
let config: ImageConfiguration;
|
let config: OciSchema<ImageConfiguration>;
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.phase = OciProgressPhase::ConfigAcquire;
|
progress.phase = OciProgressPhase::ConfigDownload;
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
|
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
|
||||||
if let Some(seeded) = self
|
if let Some(seeded) = self
|
||||||
.load_seed_json_blob::<ImageConfiguration>(image.manifest.config())
|
.load_seed_json_blob::<ImageConfiguration>(image.manifest.item().config())
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
config = seeded;
|
config = seeded;
|
||||||
} else {
|
} else {
|
||||||
let config_bytes = client
|
let config_bytes = client
|
||||||
.get_blob(&image.name.name, image.manifest.config())
|
.get_blob(&image.name.name, image.manifest.item().config())
|
||||||
.await?;
|
.await?;
|
||||||
config = serde_json::from_slice(&config_bytes)?;
|
config = OciSchema::new(
|
||||||
|
config_bytes.to_vec(),
|
||||||
|
serde_json::from_slice(&config_bytes)?,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.phase = OciProgressPhase::LayerAcquire;
|
progress.phase = OciProgressPhase::LayerDownload;
|
||||||
|
|
||||||
for layer in image.manifest.layers() {
|
for layer in image.manifest.item().layers() {
|
||||||
progress.add_layer(layer.digest(), layer.size() as usize);
|
progress.add_layer(layer.digest());
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
let mut layers = Vec::new();
|
let mut layers = Vec::new();
|
||||||
for layer in image.manifest.layers() {
|
for layer in image.manifest.item().layers() {
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
|
progress.downloading_layer(layer.digest(), 0, layer.size() as u64);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
layers.push(
|
layers.push(
|
||||||
@ -255,12 +313,12 @@ impl OciImageFetcher {
|
|||||||
);
|
);
|
||||||
self.progress
|
self.progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.downloaded_layer(layer.digest());
|
progress.downloaded_layer(layer.digest(), layer.size() as u64);
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
Ok(OciLocalImage {
|
Ok(OciLocalImage {
|
||||||
image,
|
image: image.clone(),
|
||||||
config,
|
config,
|
||||||
layers,
|
layers,
|
||||||
})
|
})
|
||||||
@ -294,6 +352,12 @@ impl OciImageFetcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let metadata = fs::metadata(&layer_path).await?;
|
||||||
|
|
||||||
|
if layer.size() as u64 != metadata.size() {
|
||||||
|
return Err(anyhow!("layer size differs from size in manifest",));
|
||||||
|
}
|
||||||
|
|
||||||
let mut media_type = layer.media_type().clone();
|
let mut media_type = layer.media_type().clone();
|
||||||
|
|
||||||
// docker layer compatibility
|
// docker layer compatibility
|
||||||
@ -308,6 +372,7 @@ impl OciImageFetcher {
|
|||||||
other => return Err(anyhow!("found layer with unknown media type: {}", other)),
|
other => return Err(anyhow!("found layer with unknown media type: {}", other)),
|
||||||
};
|
};
|
||||||
Ok(OciImageLayer {
|
Ok(OciImageLayer {
|
||||||
|
metadata: layer.clone(),
|
||||||
path: layer_path,
|
path: layer_path,
|
||||||
digest: layer.digest().clone(),
|
digest: layer.digest().clone(),
|
||||||
compression,
|
compression,
|
||||||
|
@ -4,4 +4,5 @@ pub mod name;
|
|||||||
pub mod packer;
|
pub mod packer;
|
||||||
pub mod progress;
|
pub mod progress;
|
||||||
pub mod registry;
|
pub mod registry;
|
||||||
|
pub mod schema;
|
||||||
pub mod vfs;
|
pub mod vfs;
|
||||||
|
@ -2,27 +2,39 @@ use anyhow::Result;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
const DOCKER_HUB_MIRROR: &str = "mirror.gcr.io";
|
|
||||||
const DEFAULT_IMAGE_TAG: &str = "latest";
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct ImageName {
|
pub struct ImageName {
|
||||||
pub hostname: String,
|
pub hostname: String,
|
||||||
pub port: Option<u16>,
|
pub port: Option<u16>,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub reference: String,
|
pub reference: Option<String>,
|
||||||
|
pub digest: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for ImageName {
|
impl fmt::Display for ImageName {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
if let Some(port) = self.port {
|
let mut suffix = String::new();
|
||||||
write!(
|
|
||||||
f,
|
if let Some(ref reference) = self.reference {
|
||||||
"{}:{}/{}:{}",
|
suffix.push(':');
|
||||||
self.hostname, port, self.name, self.reference
|
suffix.push_str(reference);
|
||||||
)
|
}
|
||||||
|
|
||||||
|
if let Some(ref digest) = self.digest {
|
||||||
|
suffix.push('@');
|
||||||
|
suffix.push_str(digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ImageName::DOCKER_HUB_MIRROR == self.hostname && self.port.is_none() {
|
||||||
|
if self.name.starts_with("library/") {
|
||||||
|
write!(f, "{}{}", &self.name[8..], suffix)
|
||||||
|
} else {
|
||||||
|
write!(f, "{}{}", self.name, suffix)
|
||||||
|
}
|
||||||
|
} else if let Some(port) = self.port {
|
||||||
|
write!(f, "{}:{}/{}{}", self.hostname, port, self.name, suffix)
|
||||||
} else {
|
} else {
|
||||||
write!(f, "{}/{}:{}", self.hostname, self.name, self.reference)
|
write!(f, "{}/{}{}", self.hostname, self.name, suffix)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -35,13 +47,21 @@ impl Default for ImageName {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ImageName {
|
impl ImageName {
|
||||||
|
pub const DOCKER_HUB_MIRROR: &'static str = "registry.docker.io";
|
||||||
|
pub const DEFAULT_IMAGE_TAG: &'static str = "latest";
|
||||||
|
|
||||||
pub fn parse(name: &str) -> Result<Self> {
|
pub fn parse(name: &str) -> Result<Self> {
|
||||||
let full_name = name.to_string();
|
let full_name = name.to_string();
|
||||||
let name = full_name.clone();
|
let name = full_name.clone();
|
||||||
let (mut hostname, mut name) = name
|
let (mut hostname, mut name) = name
|
||||||
.split_once('/')
|
.split_once('/')
|
||||||
.map(|x| (x.0.to_string(), x.1.to_string()))
|
.map(|x| (x.0.to_string(), x.1.to_string()))
|
||||||
.unwrap_or_else(|| (DOCKER_HUB_MIRROR.to_string(), format!("library/{}", name)));
|
.unwrap_or_else(|| {
|
||||||
|
(
|
||||||
|
ImageName::DOCKER_HUB_MIRROR.to_string(),
|
||||||
|
format!("library/{}", name),
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
// heuristic to find any docker hub image formats
|
// heuristic to find any docker hub image formats
|
||||||
// that may be in the hostname format. for example:
|
// that may be in the hostname format. for example:
|
||||||
@ -49,7 +69,7 @@ impl ImageName {
|
|||||||
// and neither will abc/hello/xyz:latest
|
// and neither will abc/hello/xyz:latest
|
||||||
if !hostname.contains('.') && full_name.chars().filter(|x| *x == '/').count() == 1 {
|
if !hostname.contains('.') && full_name.chars().filter(|x| *x == '/').count() == 1 {
|
||||||
name = format!("{}/{}", hostname, name);
|
name = format!("{}/{}", hostname, name);
|
||||||
hostname = DOCKER_HUB_MIRROR.to_string();
|
hostname = ImageName::DOCKER_HUB_MIRROR.to_string();
|
||||||
}
|
}
|
||||||
|
|
||||||
let (hostname, port) = if let Some((hostname, port)) = hostname
|
let (hostname, port) = if let Some((hostname, port)) = hostname
|
||||||
@ -60,15 +80,54 @@ impl ImageName {
|
|||||||
} else {
|
} else {
|
||||||
(hostname, None)
|
(hostname, None)
|
||||||
};
|
};
|
||||||
let (name, reference) = name
|
|
||||||
.split_once(':')
|
let name_has_digest = if name.contains('@') {
|
||||||
.map(|x| (x.0.to_string(), x.1.to_string()))
|
let digest_start = name.chars().position(|c| c == '@');
|
||||||
.unwrap_or((name.to_string(), DEFAULT_IMAGE_TAG.to_string()));
|
let ref_start = name.chars().position(|c| c == ':');
|
||||||
|
if let (Some(digest_start), Some(ref_start)) = (digest_start, ref_start) {
|
||||||
|
digest_start < ref_start
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
let (name, digest) = if name_has_digest {
|
||||||
|
name.split_once('@')
|
||||||
|
.map(|(name, digest)| (name.to_string(), Some(digest.to_string())))
|
||||||
|
.unwrap_or_else(|| (name, None))
|
||||||
|
} else {
|
||||||
|
(name, None)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (name, reference) = if name.contains(':') {
|
||||||
|
name.split_once(':')
|
||||||
|
.map(|(name, reference)| (name.to_string(), Some(reference.to_string())))
|
||||||
|
.unwrap_or((name, None))
|
||||||
|
} else {
|
||||||
|
(name, None)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (reference, digest) = if let Some(reference) = reference {
|
||||||
|
if let Some(digest) = digest {
|
||||||
|
(Some(reference), Some(digest))
|
||||||
|
} else {
|
||||||
|
reference
|
||||||
|
.split_once('@')
|
||||||
|
.map(|(reff, digest)| (Some(reff.to_string()), Some(digest.to_string())))
|
||||||
|
.unwrap_or_else(|| (Some(reference), None))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
(None, digest)
|
||||||
|
};
|
||||||
|
|
||||||
Ok(ImageName {
|
Ok(ImageName {
|
||||||
hostname,
|
hostname,
|
||||||
port,
|
port,
|
||||||
name,
|
name,
|
||||||
reference,
|
reference,
|
||||||
|
digest,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,18 +1,22 @@
|
|||||||
use std::{path::Path, process::Stdio, sync::Arc};
|
use std::{os::unix::fs::MetadataExt, path::Path, process::Stdio, sync::Arc};
|
||||||
|
|
||||||
use super::OciPackedFormat;
|
use super::OciPackedFormat;
|
||||||
use crate::{
|
use crate::{progress::OciBoundProgress, vfs::VfsTree};
|
||||||
progress::{OciBoundProgress, OciProgressPhase},
|
|
||||||
vfs::VfsTree,
|
|
||||||
};
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use tokio::{pin, process::Command, select};
|
use tokio::{
|
||||||
|
fs::{self, File},
|
||||||
|
io::BufWriter,
|
||||||
|
pin,
|
||||||
|
process::{Child, Command},
|
||||||
|
select,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy)]
|
||||||
pub enum OciPackerBackendType {
|
pub enum OciPackerBackendType {
|
||||||
MkSquashfs,
|
MkSquashfs,
|
||||||
MkfsErofs,
|
MkfsErofs,
|
||||||
|
Tar,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciPackerBackendType {
|
impl OciPackerBackendType {
|
||||||
@ -20,6 +24,7 @@ impl OciPackerBackendType {
|
|||||||
match self {
|
match self {
|
||||||
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
|
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
|
||||||
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
|
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
|
||||||
|
OciPackerBackendType::Tar => OciPackedFormat::Tar,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,6 +36,7 @@ impl OciPackerBackendType {
|
|||||||
OciPackerBackendType::MkfsErofs => {
|
OciPackerBackendType::MkfsErofs => {
|
||||||
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
|
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
|
||||||
}
|
}
|
||||||
|
OciPackerBackendType::Tar => Box::new(OciPackerTar {}) as Box<dyn OciPackerBackend>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -47,13 +53,11 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
|
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
|
||||||
progress
|
progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.phase = OciProgressPhase::Packing;
|
progress.start_packing();
|
||||||
progress.total = 1;
|
|
||||||
progress.value = 0;
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let mut child = Command::new("mksquashfs")
|
let child = Command::new("mksquashfs")
|
||||||
.arg("-")
|
.arg("-")
|
||||||
.arg(file)
|
.arg(file)
|
||||||
.arg("-comp")
|
.arg("-comp")
|
||||||
@ -63,7 +67,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
.stderr(Stdio::null())
|
.stderr(Stdio::null())
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.spawn()?;
|
.spawn()?;
|
||||||
|
let mut child = ChildProcessKillGuard(child);
|
||||||
let stdin = child
|
let stdin = child
|
||||||
|
.0
|
||||||
.stdin
|
.stdin
|
||||||
.take()
|
.take()
|
||||||
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
||||||
@ -74,7 +80,7 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}));
|
}));
|
||||||
let wait = child.wait();
|
let wait = child.0.wait();
|
||||||
pin!(wait);
|
pin!(wait);
|
||||||
let status_result = loop {
|
let status_result = loop {
|
||||||
if let Some(inner) = writer.as_mut() {
|
if let Some(inner) = writer.as_mut() {
|
||||||
@ -110,12 +116,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
status.code().unwrap()
|
status.code().unwrap()
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
|
let metadata = fs::metadata(&file).await?;
|
||||||
progress
|
progress
|
||||||
.update(|progress| {
|
.update(|progress| progress.complete(metadata.size()))
|
||||||
progress.phase = OciProgressPhase::Packing;
|
|
||||||
progress.total = 1;
|
|
||||||
progress.value = 1;
|
|
||||||
})
|
|
||||||
.await;
|
.await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -126,32 +129,32 @@ pub struct OciPackerMkfsErofs {}
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl OciPackerBackend for OciPackerMkfsErofs {
|
impl OciPackerBackend for OciPackerMkfsErofs {
|
||||||
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
|
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
|
||||||
progress
|
progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.phase = OciProgressPhase::Packing;
|
progress.start_packing();
|
||||||
progress.total = 1;
|
|
||||||
progress.value = 0;
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let mut child = Command::new("mkfs.erofs")
|
let child = Command::new("mkfs.erofs")
|
||||||
.arg("-L")
|
.arg("-L")
|
||||||
.arg("root")
|
.arg("root")
|
||||||
.arg("--tar=-")
|
.arg("--tar=-")
|
||||||
.arg(path)
|
.arg(file)
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stderr(Stdio::null())
|
.stderr(Stdio::null())
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.spawn()?;
|
.spawn()?;
|
||||||
|
let mut child = ChildProcessKillGuard(child);
|
||||||
let stdin = child
|
let stdin = child
|
||||||
|
.0
|
||||||
.stdin
|
.stdin
|
||||||
.take()
|
.take()
|
||||||
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
||||||
let mut writer = Some(tokio::task::spawn(
|
let mut writer = Some(tokio::task::spawn(
|
||||||
async move { vfs.write_to_tar(stdin).await },
|
async move { vfs.write_to_tar(stdin).await },
|
||||||
));
|
));
|
||||||
let wait = child.wait();
|
let wait = child.0.wait();
|
||||||
pin!(wait);
|
pin!(wait);
|
||||||
let status_result = loop {
|
let status_result = loop {
|
||||||
if let Some(inner) = writer.as_mut() {
|
if let Some(inner) = writer.as_mut() {
|
||||||
@ -188,14 +191,46 @@ impl OciPackerBackend for OciPackerMkfsErofs {
|
|||||||
status.code().unwrap()
|
status.code().unwrap()
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
|
let metadata = fs::metadata(&file).await?;
|
||||||
progress
|
progress
|
||||||
.update(|progress| {
|
.update(|progress| {
|
||||||
progress.phase = OciProgressPhase::Packing;
|
progress.complete(metadata.size());
|
||||||
progress.total = 1;
|
|
||||||
progress.value = 1;
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct OciPackerTar {}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl OciPackerBackend for OciPackerTar {
|
||||||
|
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
|
||||||
|
progress
|
||||||
|
.update(|progress| {
|
||||||
|
progress.start_packing();
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let output = File::create(file).await?;
|
||||||
|
let output = BufWriter::new(output);
|
||||||
|
vfs.write_to_tar(output).await?;
|
||||||
|
|
||||||
|
let metadata = fs::metadata(file).await?;
|
||||||
|
progress
|
||||||
|
.update(|progress| {
|
||||||
|
progress.complete(metadata.size());
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ChildProcessKillGuard(Child);
|
||||||
|
|
||||||
|
impl Drop for ChildProcessKillGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let _ = self.0.start_kill();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,66 +1,121 @@
|
|||||||
use crate::packer::{OciImagePacked, OciPackedFormat};
|
use crate::{
|
||||||
|
name::ImageName,
|
||||||
|
packer::{OciPackedFormat, OciPackedImage},
|
||||||
|
schema::OciSchema,
|
||||||
|
};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use log::debug;
|
use log::{debug, error};
|
||||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
use oci_spec::image::{
|
||||||
use std::path::{Path, PathBuf};
|
Descriptor, ImageConfiguration, ImageIndex, ImageIndexBuilder, ImageManifest, MediaType,
|
||||||
use tokio::fs;
|
ANNOTATION_REF_NAME,
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
use tokio::{fs, sync::RwLock};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct OciPackerCache {
|
pub struct OciPackerCache {
|
||||||
cache_dir: PathBuf,
|
cache_dir: PathBuf,
|
||||||
|
index: Arc<RwLock<ImageIndex>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const ANNOTATION_IMAGE_NAME: &str = "io.containerd.image.name";
|
||||||
|
const ANNOTATION_OCI_PACKER_FORMAT: &str = "dev.krata.oci.packer.format";
|
||||||
|
|
||||||
impl OciPackerCache {
|
impl OciPackerCache {
|
||||||
pub fn new(cache_dir: &Path) -> Result<OciPackerCache> {
|
pub async fn new(cache_dir: &Path) -> Result<OciPackerCache> {
|
||||||
Ok(OciPackerCache {
|
let index = ImageIndexBuilder::default()
|
||||||
|
.schema_version(2u32)
|
||||||
|
.media_type(MediaType::ImageIndex)
|
||||||
|
.manifests(Vec::new())
|
||||||
|
.build()?;
|
||||||
|
let cache = OciPackerCache {
|
||||||
cache_dir: cache_dir.to_path_buf(),
|
cache_dir: cache_dir.to_path_buf(),
|
||||||
})
|
index: Arc::new(RwLock::new(index)),
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut mutex = cache.index.write().await;
|
||||||
|
*mutex = cache.load_index().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list(&self) -> Result<Vec<Descriptor>> {
|
||||||
|
let index = self.index.read().await;
|
||||||
|
Ok(index.manifests().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn recall(
|
pub async fn recall(
|
||||||
&self,
|
&self,
|
||||||
|
name: ImageName,
|
||||||
digest: &str,
|
digest: &str,
|
||||||
format: OciPackedFormat,
|
format: OciPackedFormat,
|
||||||
) -> Result<Option<OciImagePacked>> {
|
) -> Result<Option<OciPackedImage>> {
|
||||||
|
let index = self.index.read().await;
|
||||||
|
|
||||||
|
let mut descriptor: Option<Descriptor> = None;
|
||||||
|
for manifest in index.manifests() {
|
||||||
|
if manifest.digest() == digest
|
||||||
|
&& manifest
|
||||||
|
.annotations()
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
|
||||||
|
.map(|x| x.as_str())
|
||||||
|
== Some(format.extension())
|
||||||
|
{
|
||||||
|
descriptor = Some(manifest.clone());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(descriptor) = descriptor else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
let mut fs_path = self.cache_dir.clone();
|
let mut fs_path = self.cache_dir.clone();
|
||||||
let mut config_path = self.cache_dir.clone();
|
let mut config_path = self.cache_dir.clone();
|
||||||
let mut manifest_path = self.cache_dir.clone();
|
let mut manifest_path = self.cache_dir.clone();
|
||||||
fs_path.push(format!("{}.{}", digest, format.extension()));
|
fs_path.push(format!("{}.{}", digest, format.extension()));
|
||||||
manifest_path.push(format!("{}.manifest.json", digest));
|
manifest_path.push(format!("{}.manifest.json", digest));
|
||||||
config_path.push(format!("{}.config.json", digest));
|
config_path.push(format!("{}.config.json", digest));
|
||||||
Ok(
|
|
||||||
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
|
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
|
||||||
let image_metadata = fs::metadata(&fs_path).await?;
|
let image_metadata = fs::metadata(&fs_path).await?;
|
||||||
let manifest_metadata = fs::metadata(&manifest_path).await?;
|
let manifest_metadata = fs::metadata(&manifest_path).await?;
|
||||||
let config_metadata = fs::metadata(&config_path).await?;
|
let config_metadata = fs::metadata(&config_path).await?;
|
||||||
if image_metadata.is_file()
|
if image_metadata.is_file() && manifest_metadata.is_file() && config_metadata.is_file()
|
||||||
&& manifest_metadata.is_file()
|
{
|
||||||
&& config_metadata.is_file()
|
let manifest_bytes = fs::read(&manifest_path).await?;
|
||||||
{
|
let manifest: ImageManifest = serde_json::from_slice(&manifest_bytes)?;
|
||||||
let manifest_text = fs::read_to_string(&manifest_path).await?;
|
let config_bytes = fs::read(&config_path).await?;
|
||||||
let manifest: ImageManifest = serde_json::from_str(&manifest_text)?;
|
let config: ImageConfiguration = serde_json::from_slice(&config_bytes)?;
|
||||||
let config_text = fs::read_to_string(&config_path).await?;
|
debug!("cache hit digest={}", digest);
|
||||||
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
|
Ok(Some(OciPackedImage::new(
|
||||||
debug!("cache hit digest={}", digest);
|
name,
|
||||||
Some(OciImagePacked::new(
|
digest.to_string(),
|
||||||
digest.to_string(),
|
fs_path.clone(),
|
||||||
fs_path.clone(),
|
format,
|
||||||
format,
|
descriptor,
|
||||||
config,
|
OciSchema::new(config_bytes, config),
|
||||||
manifest,
|
OciSchema::new(manifest_bytes, manifest),
|
||||||
))
|
)))
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
debug!("cache miss digest={}", digest);
|
Ok(None)
|
||||||
None
|
}
|
||||||
},
|
} else {
|
||||||
)
|
debug!("cache miss digest={}", digest);
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store(&self, packed: OciImagePacked) -> Result<OciImagePacked> {
|
pub async fn store(&self, packed: OciPackedImage) -> Result<OciPackedImage> {
|
||||||
|
let mut index = self.index.write().await;
|
||||||
|
let mut manifests = index.manifests().clone();
|
||||||
debug!("cache store digest={}", packed.digest);
|
debug!("cache store digest={}", packed.digest);
|
||||||
let mut fs_path = self.cache_dir.clone();
|
let mut fs_path = self.cache_dir.clone();
|
||||||
let mut manifest_path = self.cache_dir.clone();
|
let mut manifest_path = self.cache_dir.clone();
|
||||||
@ -68,17 +123,98 @@ impl OciPackerCache {
|
|||||||
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
|
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
|
||||||
manifest_path.push(format!("{}.manifest.json", packed.digest));
|
manifest_path.push(format!("{}.manifest.json", packed.digest));
|
||||||
config_path.push(format!("{}.config.json", packed.digest));
|
config_path.push(format!("{}.config.json", packed.digest));
|
||||||
fs::copy(&packed.path, &fs_path).await?;
|
if fs::rename(&packed.path, &fs_path).await.is_err() {
|
||||||
let manifest_text = serde_json::to_string_pretty(&packed.manifest)?;
|
fs::copy(&packed.path, &fs_path).await?;
|
||||||
fs::write(&manifest_path, manifest_text).await?;
|
fs::remove_file(&packed.path).await?;
|
||||||
let config_text = serde_json::to_string_pretty(&packed.config)?;
|
}
|
||||||
fs::write(&config_path, config_text).await?;
|
fs::write(&config_path, packed.config.raw()).await?;
|
||||||
Ok(OciImagePacked::new(
|
fs::write(&manifest_path, packed.manifest.raw()).await?;
|
||||||
|
manifests.retain(|item| {
|
||||||
|
if item.digest() != &packed.digest {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(format) = item
|
||||||
|
.annotations()
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
|
||||||
|
.map(|x| x.as_str())
|
||||||
|
else {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
if format != packed.format.extension() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut descriptor = packed.descriptor.clone();
|
||||||
|
let mut annotations = descriptor.annotations().clone().unwrap_or_default();
|
||||||
|
annotations.insert(
|
||||||
|
ANNOTATION_OCI_PACKER_FORMAT.to_string(),
|
||||||
|
packed.format.extension().to_string(),
|
||||||
|
);
|
||||||
|
let image_name = packed.name.to_string();
|
||||||
|
annotations.insert(ANNOTATION_IMAGE_NAME.to_string(), image_name);
|
||||||
|
let image_ref = packed.name.reference.clone();
|
||||||
|
if let Some(image_ref) = image_ref {
|
||||||
|
annotations.insert(ANNOTATION_REF_NAME.to_string(), image_ref);
|
||||||
|
}
|
||||||
|
descriptor.set_annotations(Some(annotations));
|
||||||
|
manifests.push(descriptor.clone());
|
||||||
|
index.set_manifests(manifests);
|
||||||
|
self.save_index(&index).await?;
|
||||||
|
|
||||||
|
let packed = OciPackedImage::new(
|
||||||
|
packed.name,
|
||||||
packed.digest,
|
packed.digest,
|
||||||
fs_path.clone(),
|
fs_path.clone(),
|
||||||
packed.format,
|
packed.format,
|
||||||
|
descriptor,
|
||||||
packed.config,
|
packed.config,
|
||||||
packed.manifest,
|
packed.manifest,
|
||||||
))
|
);
|
||||||
|
Ok(packed)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn save_empty_index(&self) -> Result<ImageIndex> {
|
||||||
|
let index = ImageIndexBuilder::default()
|
||||||
|
.schema_version(2u32)
|
||||||
|
.media_type(MediaType::ImageIndex)
|
||||||
|
.manifests(Vec::new())
|
||||||
|
.build()?;
|
||||||
|
self.save_index(&index).await?;
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_index(&self) -> Result<ImageIndex> {
|
||||||
|
let mut index_path = self.cache_dir.clone();
|
||||||
|
index_path.push("index.json");
|
||||||
|
|
||||||
|
if !index_path.exists() {
|
||||||
|
self.save_empty_index().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let content = fs::read_to_string(&index_path).await?;
|
||||||
|
let index = match serde_json::from_str::<ImageIndex>(&content) {
|
||||||
|
Ok(index) => index,
|
||||||
|
Err(error) => {
|
||||||
|
error!("image index was corrupted, creating a new one: {}", error);
|
||||||
|
self.save_empty_index().await?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn save_index(&self, index: &ImageIndex) -> Result<()> {
|
||||||
|
let mut encoded = serde_json::to_string_pretty(index)?;
|
||||||
|
encoded.push('\n');
|
||||||
|
let mut index_path = self.cache_dir.clone();
|
||||||
|
index_path.push("index.json");
|
||||||
|
fs::write(&index_path, encoded).await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,17 +1,20 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::{name::ImageName, schema::OciSchema};
|
||||||
|
|
||||||
use self::backend::OciPackerBackendType;
|
use self::backend::OciPackerBackendType;
|
||||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
|
||||||
|
|
||||||
pub mod backend;
|
pub mod backend;
|
||||||
pub mod cache;
|
pub mod cache;
|
||||||
pub mod service;
|
pub mod service;
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, Copy)]
|
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash)]
|
||||||
pub enum OciPackedFormat {
|
pub enum OciPackedFormat {
|
||||||
#[default]
|
#[default]
|
||||||
Squashfs,
|
Squashfs,
|
||||||
Erofs,
|
Erofs,
|
||||||
|
Tar,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciPackedFormat {
|
impl OciPackedFormat {
|
||||||
@ -19,6 +22,7 @@ impl OciPackedFormat {
|
|||||||
match self {
|
match self {
|
||||||
OciPackedFormat::Squashfs => "squashfs",
|
OciPackedFormat::Squashfs => "squashfs",
|
||||||
OciPackedFormat::Erofs => "erofs",
|
OciPackedFormat::Erofs => "erofs",
|
||||||
|
OciPackedFormat::Tar => "tar",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -26,31 +30,38 @@ impl OciPackedFormat {
|
|||||||
match self {
|
match self {
|
||||||
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
|
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
|
||||||
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
|
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
|
||||||
|
OciPackedFormat::Tar => OciPackerBackendType::Tar,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct OciImagePacked {
|
pub struct OciPackedImage {
|
||||||
|
pub name: ImageName,
|
||||||
pub digest: String,
|
pub digest: String,
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
pub format: OciPackedFormat,
|
pub format: OciPackedFormat,
|
||||||
pub config: ImageConfiguration,
|
pub descriptor: Descriptor,
|
||||||
pub manifest: ImageManifest,
|
pub config: OciSchema<ImageConfiguration>,
|
||||||
|
pub manifest: OciSchema<ImageManifest>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciImagePacked {
|
impl OciPackedImage {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
name: ImageName,
|
||||||
digest: String,
|
digest: String,
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
format: OciPackedFormat,
|
format: OciPackedFormat,
|
||||||
config: ImageConfiguration,
|
descriptor: Descriptor,
|
||||||
manifest: ImageManifest,
|
config: OciSchema<ImageConfiguration>,
|
||||||
) -> OciImagePacked {
|
manifest: OciSchema<ImageManifest>,
|
||||||
OciImagePacked {
|
) -> OciPackedImage {
|
||||||
|
OciPackedImage {
|
||||||
|
name,
|
||||||
digest,
|
digest,
|
||||||
path,
|
path,
|
||||||
format,
|
format,
|
||||||
|
descriptor,
|
||||||
config,
|
config,
|
||||||
manifest,
|
manifest,
|
||||||
}
|
}
|
||||||
|
@ -1,58 +1,201 @@
|
|||||||
use std::path::{Path, PathBuf};
|
use std::{
|
||||||
|
collections::{hash_map::Entry, HashMap},
|
||||||
|
fmt::Display,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
|
use oci_spec::image::Descriptor;
|
||||||
|
use tokio::{
|
||||||
|
sync::{watch, Mutex},
|
||||||
|
task::JoinHandle,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
assemble::OciImageAssembler,
|
assemble::OciImageAssembler,
|
||||||
fetch::OciImageFetcher,
|
fetch::{OciImageFetcher, OciResolvedImage},
|
||||||
name::ImageName,
|
name::ImageName,
|
||||||
progress::{OciBoundProgress, OciProgress, OciProgressContext},
|
progress::{OciBoundProgress, OciProgress, OciProgressContext},
|
||||||
registry::OciPlatform,
|
registry::OciPlatform,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{cache::OciPackerCache, OciImagePacked, OciPackedFormat};
|
use log::{error, info, warn};
|
||||||
|
|
||||||
|
use super::{cache::OciPackerCache, OciPackedFormat, OciPackedImage};
|
||||||
|
|
||||||
|
pub struct OciPackerTask {
|
||||||
|
progress: OciBoundProgress,
|
||||||
|
watch: watch::Sender<Option<Result<OciPackedImage>>>,
|
||||||
|
task: JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct OciPackerService {
|
pub struct OciPackerService {
|
||||||
seed: Option<PathBuf>,
|
seed: Option<PathBuf>,
|
||||||
platform: OciPlatform,
|
platform: OciPlatform,
|
||||||
cache: OciPackerCache,
|
cache: OciPackerCache,
|
||||||
|
tasks: Arc<Mutex<HashMap<OciPackerTaskKey, OciPackerTask>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciPackerService {
|
impl OciPackerService {
|
||||||
pub fn new(
|
pub async fn new(
|
||||||
seed: Option<PathBuf>,
|
seed: Option<PathBuf>,
|
||||||
cache_dir: &Path,
|
cache_dir: &Path,
|
||||||
platform: OciPlatform,
|
platform: OciPlatform,
|
||||||
) -> Result<OciPackerService> {
|
) -> Result<OciPackerService> {
|
||||||
Ok(OciPackerService {
|
Ok(OciPackerService {
|
||||||
seed,
|
seed,
|
||||||
cache: OciPackerCache::new(cache_dir)?,
|
cache: OciPackerCache::new(cache_dir).await?,
|
||||||
platform,
|
platform,
|
||||||
|
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn list(&self) -> Result<Vec<Descriptor>> {
|
||||||
|
self.cache.list().await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn recall(
|
pub async fn recall(
|
||||||
&self,
|
&self,
|
||||||
digest: &str,
|
digest: &str,
|
||||||
format: OciPackedFormat,
|
format: OciPackedFormat,
|
||||||
) -> Result<Option<OciImagePacked>> {
|
) -> Result<Option<OciPackedImage>> {
|
||||||
self.cache.recall(digest, format).await
|
if digest.contains('/') || digest.contains('\\') || digest.contains("..") {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.cache
|
||||||
|
.recall(ImageName::parse("cached:latest")?, digest, format)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn request(
|
pub async fn request(
|
||||||
&self,
|
&self,
|
||||||
name: ImageName,
|
name: ImageName,
|
||||||
format: OciPackedFormat,
|
format: OciPackedFormat,
|
||||||
|
overwrite: bool,
|
||||||
progress_context: OciProgressContext,
|
progress_context: OciProgressContext,
|
||||||
) -> Result<OciImagePacked> {
|
) -> Result<OciPackedImage> {
|
||||||
let progress = OciProgress::new();
|
let progress = OciProgress::new();
|
||||||
let progress = OciBoundProgress::new(progress_context.clone(), progress);
|
let progress = OciBoundProgress::new(progress_context.clone(), progress);
|
||||||
let fetcher =
|
let fetcher =
|
||||||
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
|
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
|
||||||
let resolved = fetcher.resolve(name).await?;
|
let resolved = fetcher.resolve(name.clone()).await?;
|
||||||
if let Some(cached) = self.cache.recall(&resolved.digest, format).await? {
|
let key = OciPackerTaskKey {
|
||||||
return Ok(cached);
|
digest: resolved.digest.clone(),
|
||||||
|
format,
|
||||||
|
};
|
||||||
|
let (progress_copy_task, mut receiver) = match self.tasks.lock().await.entry(key.clone()) {
|
||||||
|
Entry::Occupied(entry) => {
|
||||||
|
let entry = entry.get();
|
||||||
|
(
|
||||||
|
Some(entry.progress.also_update(progress_context).await),
|
||||||
|
entry.watch.subscribe(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
Entry::Vacant(entry) => {
|
||||||
|
let task = self
|
||||||
|
.clone()
|
||||||
|
.launch(
|
||||||
|
name,
|
||||||
|
key.clone(),
|
||||||
|
format,
|
||||||
|
overwrite,
|
||||||
|
resolved,
|
||||||
|
fetcher,
|
||||||
|
progress.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let (watch, receiver) = watch::channel(None);
|
||||||
|
|
||||||
|
let task = OciPackerTask {
|
||||||
|
progress: progress.clone(),
|
||||||
|
task,
|
||||||
|
watch,
|
||||||
|
};
|
||||||
|
entry.insert(task);
|
||||||
|
(None, receiver)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let _progress_task_guard = scopeguard::guard(progress_copy_task, |task| {
|
||||||
|
if let Some(task) = task {
|
||||||
|
task.abort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let _task_cancel_guard = scopeguard::guard(self.clone(), |service| {
|
||||||
|
service.maybe_cancel_task(key);
|
||||||
|
});
|
||||||
|
|
||||||
|
loop {
|
||||||
|
receiver.changed().await?;
|
||||||
|
let current = receiver.borrow_and_update();
|
||||||
|
if current.is_some() {
|
||||||
|
return current
|
||||||
|
.as_ref()
|
||||||
|
.map(|x| x.as_ref().map_err(|err| anyhow!("{}", err)).cloned())
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn launch(
|
||||||
|
self,
|
||||||
|
name: ImageName,
|
||||||
|
key: OciPackerTaskKey,
|
||||||
|
format: OciPackedFormat,
|
||||||
|
overwrite: bool,
|
||||||
|
resolved: OciResolvedImage,
|
||||||
|
fetcher: OciImageFetcher,
|
||||||
|
progress: OciBoundProgress,
|
||||||
|
) -> JoinHandle<()> {
|
||||||
|
info!("started packer task {}", key);
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let _task_drop_guard =
|
||||||
|
scopeguard::guard((key.clone(), self.clone()), |(key, service)| {
|
||||||
|
service.ensure_task_gone(key);
|
||||||
|
});
|
||||||
|
if let Err(error) = self
|
||||||
|
.task(
|
||||||
|
name,
|
||||||
|
key.clone(),
|
||||||
|
format,
|
||||||
|
overwrite,
|
||||||
|
resolved,
|
||||||
|
fetcher,
|
||||||
|
progress,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
self.finish(&key, Err(error)).await;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn task(
|
||||||
|
&self,
|
||||||
|
name: ImageName,
|
||||||
|
key: OciPackerTaskKey,
|
||||||
|
format: OciPackedFormat,
|
||||||
|
overwrite: bool,
|
||||||
|
resolved: OciResolvedImage,
|
||||||
|
fetcher: OciImageFetcher,
|
||||||
|
progress: OciBoundProgress,
|
||||||
|
) -> Result<()> {
|
||||||
|
if !overwrite {
|
||||||
|
if let Some(cached) = self
|
||||||
|
.cache
|
||||||
|
.recall(name.clone(), &resolved.digest, format)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
self.finish(&key, Ok(cached)).await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let assembler =
|
let assembler =
|
||||||
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
|
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
|
||||||
@ -67,15 +210,69 @@ impl OciPackerService {
|
|||||||
packer
|
packer
|
||||||
.pack(progress, assembled.vfs.clone(), &target)
|
.pack(progress, assembled.vfs.clone(), &target)
|
||||||
.await?;
|
.await?;
|
||||||
|
let packed = OciPackedImage::new(
|
||||||
let packed = OciImagePacked::new(
|
name,
|
||||||
assembled.digest.clone(),
|
assembled.digest.clone(),
|
||||||
file,
|
file,
|
||||||
format,
|
format,
|
||||||
|
assembled.descriptor.clone(),
|
||||||
assembled.config.clone(),
|
assembled.config.clone(),
|
||||||
assembled.manifest.clone(),
|
assembled.manifest.clone(),
|
||||||
);
|
);
|
||||||
let packed = self.cache.store(packed).await?;
|
let packed = self.cache.store(packed).await?;
|
||||||
Ok(packed)
|
self.finish(&key, Ok(packed)).await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn finish(&self, key: &OciPackerTaskKey, result: Result<OciPackedImage>) {
|
||||||
|
let Some(task) = self.tasks.lock().await.remove(key) else {
|
||||||
|
error!("packer task {} was not found when task completed", key);
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
match result.as_ref() {
|
||||||
|
Ok(_) => {
|
||||||
|
info!("completed packer task {}", key);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(err) => {
|
||||||
|
warn!("packer task {} failed: {}", key, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task.watch.send_replace(Some(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn maybe_cancel_task(self, key: OciPackerTaskKey) {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let tasks = self.tasks.lock().await;
|
||||||
|
if let Some(task) = tasks.get(&key) {
|
||||||
|
if task.watch.is_closed() {
|
||||||
|
task.task.abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensure_task_gone(self, key: OciPackerTaskKey) {
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let mut tasks = self.tasks.lock().await;
|
||||||
|
if let Some(task) = tasks.remove(&key) {
|
||||||
|
warn!("aborted packer task {}", key);
|
||||||
|
task.watch.send_replace(Some(Err(anyhow!("task aborted"))));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
|
||||||
|
struct OciPackerTaskKey {
|
||||||
|
digest: String,
|
||||||
|
format: OciPackedFormat,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for OciPackerTaskKey {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_fmt(format_args!("{}:{}", self.digest, self.format.extension()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use tokio::sync::{mpsc::Sender, Mutex};
|
use std::sync::Arc;
|
||||||
|
use tokio::{
|
||||||
|
sync::{watch, Mutex},
|
||||||
|
task::JoinHandle,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OciProgress {
|
pub struct OciProgress {
|
||||||
pub phase: OciProgressPhase,
|
pub phase: OciProgressPhase,
|
||||||
|
pub digest: Option<String>,
|
||||||
pub layers: IndexMap<String, OciProgressLayer>,
|
pub layers: IndexMap<String, OciProgressLayer>,
|
||||||
pub value: u64,
|
pub indication: OciProgressIndication,
|
||||||
pub total: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for OciProgress {
|
impl Default for OciProgress {
|
||||||
@ -20,72 +22,146 @@ impl Default for OciProgress {
|
|||||||
impl OciProgress {
|
impl OciProgress {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
OciProgress {
|
OciProgress {
|
||||||
phase: OciProgressPhase::Resolving,
|
phase: OciProgressPhase::Started,
|
||||||
|
digest: None,
|
||||||
layers: IndexMap::new(),
|
layers: IndexMap::new(),
|
||||||
value: 0,
|
indication: OciProgressIndication::Hidden,
|
||||||
total: 1,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_layer(&mut self, id: &str, size: usize) {
|
pub fn start_resolving(&mut self) {
|
||||||
|
self.phase = OciProgressPhase::Resolving;
|
||||||
|
self.indication = OciProgressIndication::Spinner { message: None };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolved(&mut self, digest: &str) {
|
||||||
|
self.digest = Some(digest.to_string());
|
||||||
|
self.indication = OciProgressIndication::Hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_layer(&mut self, id: &str) {
|
||||||
self.layers.insert(
|
self.layers.insert(
|
||||||
id.to_string(),
|
id.to_string(),
|
||||||
OciProgressLayer {
|
OciProgressLayer {
|
||||||
id: id.to_string(),
|
id: id.to_string(),
|
||||||
phase: OciProgressLayerPhase::Waiting,
|
phase: OciProgressLayerPhase::Waiting,
|
||||||
value: 0,
|
indication: OciProgressIndication::Spinner { message: None },
|
||||||
total: size as u64,
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn downloading_layer(&mut self, id: &str, downloaded: usize, total: usize) {
|
pub fn downloading_layer(&mut self, id: &str, downloaded: u64, total: u64) {
|
||||||
if let Some(entry) = self.layers.get_mut(id) {
|
if let Some(entry) = self.layers.get_mut(id) {
|
||||||
entry.phase = OciProgressLayerPhase::Downloading;
|
entry.phase = OciProgressLayerPhase::Downloading;
|
||||||
entry.value = downloaded as u64;
|
entry.indication = OciProgressIndication::ProgressBar {
|
||||||
entry.total = total as u64;
|
message: None,
|
||||||
|
current: downloaded,
|
||||||
|
total,
|
||||||
|
bytes: true,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn downloaded_layer(&mut self, id: &str) {
|
pub fn downloaded_layer(&mut self, id: &str, total: u64) {
|
||||||
if let Some(entry) = self.layers.get_mut(id) {
|
if let Some(entry) = self.layers.get_mut(id) {
|
||||||
entry.phase = OciProgressLayerPhase::Downloaded;
|
entry.phase = OciProgressLayerPhase::Downloaded;
|
||||||
entry.value = entry.total;
|
entry.indication = OciProgressIndication::Completed {
|
||||||
|
message: None,
|
||||||
|
total: Some(total),
|
||||||
|
bytes: true,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extracting_layer(&mut self, id: &str, extracted: usize, total: usize) {
|
pub fn start_assemble(&mut self) {
|
||||||
|
self.phase = OciProgressPhase::Assemble;
|
||||||
|
self.indication = OciProgressIndication::Hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_extracting_layer(&mut self, id: &str) {
|
||||||
if let Some(entry) = self.layers.get_mut(id) {
|
if let Some(entry) = self.layers.get_mut(id) {
|
||||||
entry.phase = OciProgressLayerPhase::Extracting;
|
entry.phase = OciProgressLayerPhase::Extracting;
|
||||||
entry.value = extracted as u64;
|
entry.indication = OciProgressIndication::Spinner { message: None };
|
||||||
entry.total = total as u64;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extracted_layer(&mut self, id: &str) {
|
pub fn extracting_layer(&mut self, id: &str, file: &str) {
|
||||||
|
if let Some(entry) = self.layers.get_mut(id) {
|
||||||
|
entry.phase = OciProgressLayerPhase::Extracting;
|
||||||
|
entry.indication = OciProgressIndication::Spinner {
|
||||||
|
message: Some(file.to_string()),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extracted_layer(&mut self, id: &str, count: u64, total_size: u64) {
|
||||||
if let Some(entry) = self.layers.get_mut(id) {
|
if let Some(entry) = self.layers.get_mut(id) {
|
||||||
entry.phase = OciProgressLayerPhase::Extracted;
|
entry.phase = OciProgressLayerPhase::Extracted;
|
||||||
entry.value = entry.total;
|
entry.indication = OciProgressIndication::Completed {
|
||||||
|
message: Some(format!("{} files", count)),
|
||||||
|
total: Some(total_size),
|
||||||
|
bytes: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_packing(&mut self) {
|
||||||
|
self.phase = OciProgressPhase::Pack;
|
||||||
|
for layer in self.layers.values_mut() {
|
||||||
|
layer.indication = OciProgressIndication::Hidden;
|
||||||
|
}
|
||||||
|
self.indication = OciProgressIndication::Spinner { message: None };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn complete(&mut self, size: u64) {
|
||||||
|
self.phase = OciProgressPhase::Complete;
|
||||||
|
self.indication = OciProgressIndication::Completed {
|
||||||
|
message: None,
|
||||||
|
total: Some(size),
|
||||||
|
bytes: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum OciProgressPhase {
|
pub enum OciProgressPhase {
|
||||||
|
Started,
|
||||||
Resolving,
|
Resolving,
|
||||||
Resolved,
|
Resolved,
|
||||||
ConfigAcquire,
|
ConfigDownload,
|
||||||
LayerAcquire,
|
LayerDownload,
|
||||||
Packing,
|
Assemble,
|
||||||
|
Pack,
|
||||||
Complete,
|
Complete,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum OciProgressIndication {
|
||||||
|
Hidden,
|
||||||
|
|
||||||
|
ProgressBar {
|
||||||
|
message: Option<String>,
|
||||||
|
current: u64,
|
||||||
|
total: u64,
|
||||||
|
bytes: bool,
|
||||||
|
},
|
||||||
|
|
||||||
|
Spinner {
|
||||||
|
message: Option<String>,
|
||||||
|
},
|
||||||
|
|
||||||
|
Completed {
|
||||||
|
message: Option<String>,
|
||||||
|
total: Option<u64>,
|
||||||
|
bytes: bool,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OciProgressLayer {
|
pub struct OciProgressLayer {
|
||||||
pub id: String,
|
pub id: String,
|
||||||
pub phase: OciProgressLayerPhase,
|
pub phase: OciProgressLayerPhase,
|
||||||
pub value: u64,
|
pub indication: OciProgressIndication,
|
||||||
pub total: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@ -99,16 +175,25 @@ pub enum OciProgressLayerPhase {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct OciProgressContext {
|
pub struct OciProgressContext {
|
||||||
sender: Sender<OciProgress>,
|
sender: watch::Sender<OciProgress>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OciProgressContext {
|
impl OciProgressContext {
|
||||||
pub fn new(sender: Sender<OciProgress>) -> OciProgressContext {
|
pub fn create() -> (OciProgressContext, watch::Receiver<OciProgress>) {
|
||||||
|
let (sender, receiver) = watch::channel(OciProgress::new());
|
||||||
|
(OciProgressContext::new(sender), receiver)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(sender: watch::Sender<OciProgress>) -> OciProgressContext {
|
||||||
OciProgressContext { sender }
|
OciProgressContext { sender }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update(&self, progress: &OciProgress) {
|
pub fn update(&self, progress: &OciProgress) {
|
||||||
let _ = self.sender.try_send(progress.clone());
|
let _ = self.sender.send(progress.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn subscribe(&self) -> watch::Receiver<OciProgress> {
|
||||||
|
self.sender.subscribe()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,4 +222,17 @@ impl OciBoundProgress {
|
|||||||
function(&mut progress);
|
function(&mut progress);
|
||||||
self.context.update(&progress);
|
self.context.update(&progress);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn also_update(&self, context: OciProgressContext) -> JoinHandle<()> {
|
||||||
|
let progress = self.instance.lock().await.clone();
|
||||||
|
context.update(&progress);
|
||||||
|
let mut receiver = self.context.subscribe();
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
while (receiver.changed().await).is_ok() {
|
||||||
|
context
|
||||||
|
.sender
|
||||||
|
.send_replace(receiver.borrow_and_update().clone());
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use reqwest::{Client, RequestBuilder, Response, StatusCode};
|
|||||||
use tokio::{fs::File, io::AsyncWriteExt};
|
use tokio::{fs::File, io::AsyncWriteExt};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
use crate::progress::OciBoundProgress;
|
use crate::{name::ImageName, progress::OciBoundProgress, schema::OciSchema};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct OciPlatform {
|
pub struct OciPlatform {
|
||||||
@ -149,24 +149,20 @@ impl OciRegistryClient {
|
|||||||
))?;
|
))?;
|
||||||
let mut response = self.call(self.agent.get(url.as_str())).await?;
|
let mut response = self.call(self.agent.get(url.as_str())).await?;
|
||||||
let mut size: u64 = 0;
|
let mut size: u64 = 0;
|
||||||
let mut last_progress_size: u64 = 0;
|
|
||||||
while let Some(chunk) = response.chunk().await? {
|
while let Some(chunk) = response.chunk().await? {
|
||||||
dest.write_all(&chunk).await?;
|
dest.write_all(&chunk).await?;
|
||||||
size += chunk.len() as u64;
|
size += chunk.len() as u64;
|
||||||
|
|
||||||
if (size - last_progress_size) > (5 * 1024 * 1024) {
|
if let Some(ref progress) = progress {
|
||||||
last_progress_size = size;
|
progress
|
||||||
if let Some(ref progress) = progress {
|
.update(|progress| {
|
||||||
progress
|
progress.downloading_layer(
|
||||||
.update(|progress| {
|
descriptor.digest(),
|
||||||
progress.downloading_layer(
|
size,
|
||||||
descriptor.digest(),
|
descriptor.size() as u64,
|
||||||
size as usize,
|
);
|
||||||
descriptor.size() as usize,
|
})
|
||||||
);
|
.await;
|
||||||
})
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(size)
|
Ok(size)
|
||||||
@ -176,11 +172,11 @@ impl OciRegistryClient {
|
|||||||
&mut self,
|
&mut self,
|
||||||
name: N,
|
name: N,
|
||||||
reference: R,
|
reference: R,
|
||||||
) -> Result<(ImageManifest, String)> {
|
) -> Result<(OciSchema<ImageManifest>, String)> {
|
||||||
let url = self.url.join(&format!(
|
let url = self.url.join(&format!(
|
||||||
"/v2/{}/manifests/{}",
|
"/v2/{}/manifests/{}",
|
||||||
name.as_ref(),
|
name.as_ref(),
|
||||||
reference.as_ref()
|
reference.as_ref(),
|
||||||
))?;
|
))?;
|
||||||
let accept = format!(
|
let accept = format!(
|
||||||
"{}, {}, {}, {}",
|
"{}, {}, {}, {}",
|
||||||
@ -198,20 +194,28 @@ impl OciRegistryClient {
|
|||||||
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
|
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
|
||||||
.to_str()?
|
.to_str()?
|
||||||
.to_string();
|
.to_string();
|
||||||
let manifest = serde_json::from_str(&response.text().await?)?;
|
let bytes = response.bytes().await?;
|
||||||
Ok((manifest, digest))
|
let manifest = serde_json::from_slice(&bytes)?;
|
||||||
|
Ok((OciSchema::new(bytes.to_vec(), manifest), digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_manifest_with_digest<N: AsRef<str>, R: AsRef<str>>(
|
pub async fn get_manifest_with_digest<N: AsRef<str>, R: AsRef<str>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
name: N,
|
name: N,
|
||||||
reference: R,
|
reference: Option<R>,
|
||||||
) -> Result<(ImageManifest, String)> {
|
digest: Option<N>,
|
||||||
let url = self.url.join(&format!(
|
) -> Result<(OciSchema<ImageManifest>, Option<Descriptor>, String)> {
|
||||||
"/v2/{}/manifests/{}",
|
let what = digest
|
||||||
name.as_ref(),
|
.as_ref()
|
||||||
reference.as_ref()
|
.map(|x| x.as_ref().to_string())
|
||||||
))?;
|
.unwrap_or_else(|| {
|
||||||
|
reference
|
||||||
|
.map(|x| x.as_ref().to_string())
|
||||||
|
.unwrap_or_else(|| ImageName::DEFAULT_IMAGE_TAG.to_string())
|
||||||
|
});
|
||||||
|
let url = self
|
||||||
|
.url
|
||||||
|
.join(&format!("/v2/{}/manifests/{}", name.as_ref(), what,))?;
|
||||||
let accept = format!(
|
let accept = format!(
|
||||||
"{}, {}, {}, {}",
|
"{}, {}, {}, {}",
|
||||||
MediaType::ImageManifest.to_docker_v2s2()?,
|
MediaType::ImageManifest.to_docker_v2s2()?,
|
||||||
@ -234,18 +238,21 @@ impl OciRegistryClient {
|
|||||||
let descriptor = self
|
let descriptor = self
|
||||||
.pick_manifest(index)
|
.pick_manifest(index)
|
||||||
.ok_or_else(|| anyhow!("unable to pick manifest from index"))?;
|
.ok_or_else(|| anyhow!("unable to pick manifest from index"))?;
|
||||||
return self
|
let (manifest, digest) = self
|
||||||
.get_raw_manifest_with_digest(name, descriptor.digest())
|
.get_raw_manifest_with_digest(name, descriptor.digest())
|
||||||
.await;
|
.await?;
|
||||||
|
return Ok((manifest, Some(descriptor), digest));
|
||||||
}
|
}
|
||||||
let digest = response
|
let digest = response
|
||||||
.headers()
|
.headers()
|
||||||
.get("Docker-Content-Digest")
|
.get("Docker-Content-Digest")
|
||||||
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
|
.and_then(|x| x.to_str().ok())
|
||||||
.to_str()?
|
.map(|x| x.to_string())
|
||||||
.to_string();
|
.or_else(|| digest.map(|x: N| x.as_ref().to_string()))
|
||||||
let manifest = serde_json::from_str(&response.text().await?)?;
|
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?;
|
||||||
Ok((manifest, digest))
|
let bytes = response.bytes().await?;
|
||||||
|
let manifest = serde_json::from_slice(&bytes)?;
|
||||||
|
Ok((OciSchema::new(bytes.to_vec(), manifest), None, digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pick_manifest(&mut self, index: ImageIndex) -> Option<Descriptor> {
|
fn pick_manifest(&mut self, index: ImageIndex) -> Option<Descriptor> {
|
||||||
|
29
crates/oci/src/schema.rs
Normal file
29
crates/oci/src/schema.rs
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct OciSchema<T: Clone + Debug> {
|
||||||
|
raw: Vec<u8>,
|
||||||
|
item: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Debug> OciSchema<T> {
|
||||||
|
pub fn new(raw: Vec<u8>, item: T) -> OciSchema<T> {
|
||||||
|
OciSchema { raw, item }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn raw(&self) -> &[u8] {
|
||||||
|
&self.raw
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn item(&self) -> &T {
|
||||||
|
&self.item
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_raw(self) -> Vec<u8> {
|
||||||
|
self.raw
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_item(self) -> T {
|
||||||
|
self.item
|
||||||
|
}
|
||||||
|
}
|
@ -194,7 +194,7 @@ impl VfsTree {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
|
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<&VfsNode> {
|
||||||
let mut meta = VfsNode::from(entry)?;
|
let mut meta = VfsNode::from(entry)?;
|
||||||
let path = entry.path()?.to_path_buf();
|
let path = entry.path()?.to_path_buf();
|
||||||
let parent = if let Some(parent) = path.parent() {
|
let parent = if let Some(parent) = path.parent() {
|
||||||
@ -218,8 +218,11 @@ impl VfsTree {
|
|||||||
meta.children = old.children;
|
meta.children = old.children;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parent.children.push(meta);
|
parent.children.push(meta.clone());
|
||||||
Ok(())
|
let Some(reference) = parent.children.iter().find(|child| child.name == meta.name) else {
|
||||||
|
return Err(anyhow!("unable to find inserted child in vfs"));
|
||||||
|
};
|
||||||
|
Ok(reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {
|
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {
|
||||||
|
@ -12,18 +12,18 @@ resolver = "2"
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
backhand = { workspace = true }
|
backhand = { workspace = true }
|
||||||
ipnetwork = { workspace = true }
|
ipnetwork = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.9" }
|
krata = { path = "../krata", version = "^0.0.10" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.9" }
|
krata-oci = { path = "../oci", version = "^0.0.10" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
loopdev-3 = { workspace = true }
|
loopdev-3 = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.9" }
|
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.10" }
|
||||||
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.9" }
|
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.10" }
|
||||||
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.9" }
|
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.10" }
|
||||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
|
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.10" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "kratart"
|
name = "kratart"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use backhand::{FilesystemWriter, NodeHeader};
|
use backhand::{FilesystemWriter, NodeHeader};
|
||||||
use krata::launchcfg::LaunchInfo;
|
use krata::launchcfg::LaunchInfo;
|
||||||
use krataoci::packer::OciImagePacked;
|
use krataoci::packer::OciPackedImage;
|
||||||
use log::trace;
|
use log::trace;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@ -9,13 +9,13 @@ use std::path::PathBuf;
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
pub struct ConfigBlock<'a> {
|
pub struct ConfigBlock<'a> {
|
||||||
pub image: &'a OciImagePacked,
|
pub image: &'a OciPackedImage,
|
||||||
pub file: PathBuf,
|
pub file: PathBuf,
|
||||||
pub dir: PathBuf,
|
pub dir: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConfigBlock<'_> {
|
impl ConfigBlock<'_> {
|
||||||
pub fn new<'a>(uuid: &Uuid, image: &'a OciImagePacked) -> Result<ConfigBlock<'a>> {
|
pub fn new<'a>(uuid: &Uuid, image: &'a OciPackedImage) -> Result<ConfigBlock<'a>> {
|
||||||
let mut dir = std::env::temp_dir().clone();
|
let mut dir = std::env::temp_dir().clone();
|
||||||
dir.push(format!("krata-cfg-{}", uuid));
|
dir.push(format!("krata-cfg-{}", uuid));
|
||||||
fs::create_dir_all(&dir)?;
|
fs::create_dir_all(&dir)?;
|
||||||
@ -26,7 +26,7 @@ impl ConfigBlock<'_> {
|
|||||||
|
|
||||||
pub fn build(&self, launch_config: &LaunchInfo) -> Result<()> {
|
pub fn build(&self, launch_config: &LaunchInfo) -> Result<()> {
|
||||||
trace!("build launch_config={:?}", launch_config);
|
trace!("build launch_config={:?}", launch_config);
|
||||||
let manifest = self.image.config.to_string()?;
|
let config = self.image.config.raw();
|
||||||
let launch = serde_json::to_string(launch_config)?;
|
let launch = serde_json::to_string(launch_config)?;
|
||||||
let mut writer = FilesystemWriter::default();
|
let mut writer = FilesystemWriter::default();
|
||||||
writer.push_dir(
|
writer.push_dir(
|
||||||
@ -39,7 +39,7 @@ impl ConfigBlock<'_> {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
writer.push_file(
|
writer.push_file(
|
||||||
manifest.as_bytes(),
|
config,
|
||||||
"/image/config.json",
|
"/image/config.json",
|
||||||
NodeHeader {
|
NodeHeader {
|
||||||
permissions: 384,
|
permissions: 384,
|
||||||
|
@ -10,7 +10,7 @@ use krata::launchcfg::{
|
|||||||
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
|
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
|
||||||
LaunchPackedFormat, LaunchRoot,
|
LaunchPackedFormat, LaunchRoot,
|
||||||
};
|
};
|
||||||
use krataoci::packer::OciImagePacked;
|
use krataoci::packer::OciPackedImage;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
|
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
|
||||||
@ -23,6 +23,8 @@ use super::{GuestInfo, GuestState};
|
|||||||
|
|
||||||
pub struct GuestLaunchRequest {
|
pub struct GuestLaunchRequest {
|
||||||
pub format: LaunchPackedFormat,
|
pub format: LaunchPackedFormat,
|
||||||
|
pub kernel: Vec<u8>,
|
||||||
|
pub initrd: Vec<u8>,
|
||||||
pub uuid: Option<Uuid>,
|
pub uuid: Option<Uuid>,
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
pub vcpus: u32,
|
pub vcpus: u32,
|
||||||
@ -30,7 +32,7 @@ pub struct GuestLaunchRequest {
|
|||||||
pub env: HashMap<String, String>,
|
pub env: HashMap<String, String>,
|
||||||
pub run: Option<Vec<String>>,
|
pub run: Option<Vec<String>>,
|
||||||
pub debug: bool,
|
pub debug: bool,
|
||||||
pub image: OciImagePacked,
|
pub image: OciPackedImage,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestLauncher {
|
pub struct GuestLauncher {
|
||||||
@ -173,22 +175,22 @@ impl GuestLauncher {
|
|||||||
|
|
||||||
let config = DomainConfig {
|
let config = DomainConfig {
|
||||||
backend_domid: 0,
|
backend_domid: 0,
|
||||||
name: &xen_name,
|
name: xen_name,
|
||||||
max_vcpus: request.vcpus,
|
max_vcpus: request.vcpus,
|
||||||
mem_mb: request.mem,
|
mem_mb: request.mem,
|
||||||
kernel_path: &context.kernel,
|
kernel: request.kernel,
|
||||||
initrd_path: &context.initrd,
|
initrd: request.initrd,
|
||||||
cmdline: &cmdline,
|
cmdline,
|
||||||
use_console_backend: Some("krata-console"),
|
use_console_backend: Some("krata-console".to_string()),
|
||||||
disks: vec![
|
disks: vec![
|
||||||
DomainDisk {
|
DomainDisk {
|
||||||
vdev: "xvda",
|
vdev: "xvda".to_string(),
|
||||||
block: &image_squashfs_loop,
|
block: image_squashfs_loop.clone(),
|
||||||
writable: false,
|
writable: false,
|
||||||
},
|
},
|
||||||
DomainDisk {
|
DomainDisk {
|
||||||
vdev: "xvdb",
|
vdev: "xvdb".to_string(),
|
||||||
block: &cfgblk_squashfs_loop,
|
block: cfgblk_squashfs_loop.clone(),
|
||||||
writable: false,
|
writable: false,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
@ -197,7 +199,7 @@ impl GuestLauncher {
|
|||||||
initialized: false,
|
initialized: false,
|
||||||
}],
|
}],
|
||||||
vifs: vec![DomainNetworkInterface {
|
vifs: vec![DomainNetworkInterface {
|
||||||
mac: &guest_mac_string,
|
mac: guest_mac_string.clone(),
|
||||||
mtu: 1500,
|
mtu: 1500,
|
||||||
bridge: None,
|
bridge: None,
|
||||||
script: None,
|
script: None,
|
||||||
|
@ -1,9 +1,4 @@
|
|||||||
use std::{
|
use std::{fs, path::PathBuf, str::FromStr, sync::Arc};
|
||||||
fs,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
str::FromStr,
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use ipnetwork::IpNetwork;
|
use ipnetwork::IpNetwork;
|
||||||
@ -52,43 +47,17 @@ pub struct GuestInfo {
|
|||||||
pub struct RuntimeContext {
|
pub struct RuntimeContext {
|
||||||
pub autoloop: AutoLoop,
|
pub autoloop: AutoLoop,
|
||||||
pub xen: XenClient,
|
pub xen: XenClient,
|
||||||
pub kernel: String,
|
|
||||||
pub initrd: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RuntimeContext {
|
impl RuntimeContext {
|
||||||
pub async fn new(store: String) -> Result<Self> {
|
pub async fn new() -> Result<Self> {
|
||||||
let mut image_cache_path = PathBuf::from(&store);
|
|
||||||
image_cache_path.push("cache");
|
|
||||||
fs::create_dir_all(&image_cache_path)?;
|
|
||||||
|
|
||||||
let xen = XenClient::open(0).await?;
|
let xen = XenClient::open(0).await?;
|
||||||
image_cache_path.push("image");
|
|
||||||
fs::create_dir_all(&image_cache_path)?;
|
|
||||||
let kernel = RuntimeContext::detect_guest_file(&store, "kernel")?;
|
|
||||||
let initrd = RuntimeContext::detect_guest_file(&store, "initrd")?;
|
|
||||||
|
|
||||||
Ok(RuntimeContext {
|
Ok(RuntimeContext {
|
||||||
autoloop: AutoLoop::new(LoopControl::open()?),
|
autoloop: AutoLoop::new(LoopControl::open()?),
|
||||||
xen,
|
xen,
|
||||||
kernel,
|
|
||||||
initrd,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn detect_guest_file(store: &str, name: &str) -> Result<String> {
|
|
||||||
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
|
|
||||||
if path.is_file() {
|
|
||||||
return path_as_string(&path);
|
|
||||||
}
|
|
||||||
|
|
||||||
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
|
|
||||||
if path.is_file() {
|
|
||||||
return path_as_string(&path);
|
|
||||||
}
|
|
||||||
Err(anyhow!("unable to find required guest file: {}", name))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
||||||
let mut guests: Vec<GuestInfo> = Vec::new();
|
let mut guests: Vec<GuestInfo> = Vec::new();
|
||||||
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
||||||
@ -248,16 +217,14 @@ impl RuntimeContext {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Runtime {
|
pub struct Runtime {
|
||||||
store: Arc<String>,
|
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
launch_semaphore: Arc<Semaphore>,
|
launch_semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Runtime {
|
impl Runtime {
|
||||||
pub async fn new(store: String) -> Result<Self> {
|
pub async fn new() -> Result<Self> {
|
||||||
let context = RuntimeContext::new(store.clone()).await?;
|
let context = RuntimeContext::new().await?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
store: Arc::new(store),
|
|
||||||
context,
|
context,
|
||||||
launch_semaphore: Arc::new(Semaphore::new(1)),
|
launch_semaphore: Arc::new(Semaphore::new(1)),
|
||||||
})
|
})
|
||||||
@ -320,12 +287,6 @@ impl Runtime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn dupe(&self) -> Result<Runtime> {
|
pub async fn dupe(&self) -> Result<Runtime> {
|
||||||
Runtime::new((*self.store).clone()).await
|
Runtime::new().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn path_as_string(path: &Path) -> Result<String> {
|
|
||||||
path.to_str()
|
|
||||||
.ok_or_else(|| anyhow!("unable to convert path to string"))
|
|
||||||
.map(|x| x.to_string())
|
|
||||||
}
|
|
||||||
|
@ -14,8 +14,8 @@ elf = { workspace = true }
|
|||||||
flate2 = { workspace = true }
|
flate2 = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
krata-xencall = { path = "../xencall", version = "^0.0.9" }
|
krata-xencall = { path = "../xencall", version = "^0.0.10" }
|
||||||
krata-xenstore = { path = "../xenstore", version = "^0.0.9" }
|
krata-xenstore = { path = "../xenstore", version = "^0.0.10" }
|
||||||
memchr = { workspace = true }
|
memchr = { workspace = true }
|
||||||
nix = { workspace = true }
|
nix = { workspace = true }
|
||||||
slice-copy = { workspace = true }
|
slice-copy = { workspace = true }
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::{env, process};
|
use std::{env, process};
|
||||||
|
use tokio::fs;
|
||||||
use xenclient::error::Result;
|
use xenclient::error::Result;
|
||||||
use xenclient::{DomainConfig, XenClient};
|
use xenclient::{DomainConfig, XenClient};
|
||||||
|
|
||||||
@ -16,12 +17,12 @@ async fn main() -> Result<()> {
|
|||||||
let client = XenClient::open(0).await?;
|
let client = XenClient::open(0).await?;
|
||||||
let config = DomainConfig {
|
let config = DomainConfig {
|
||||||
backend_domid: 0,
|
backend_domid: 0,
|
||||||
name: "xenclient-test",
|
name: "xenclient-test".to_string(),
|
||||||
max_vcpus: 1,
|
max_vcpus: 1,
|
||||||
mem_mb: 512,
|
mem_mb: 512,
|
||||||
kernel_path: kernel_image_path.as_str(),
|
kernel: fs::read(&kernel_image_path).await?,
|
||||||
initrd_path: initrd_path.as_str(),
|
initrd: fs::read(&initrd_path).await?,
|
||||||
cmdline: "debug elevator=noop",
|
cmdline: "debug elevator=noop".to_string(),
|
||||||
use_console_backend: None,
|
use_console_backend: None,
|
||||||
disks: vec![],
|
disks: vec![],
|
||||||
channels: vec![],
|
channels: vec![],
|
||||||
|
@ -107,17 +107,15 @@ impl ElfImageLoader {
|
|||||||
ElfImageLoader::load_xz(file.as_slice())
|
ElfImageLoader::load_xz(file.as_slice())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_file_kernel(path: &str) -> Result<ElfImageLoader> {
|
pub fn load_file_kernel(data: &[u8]) -> Result<ElfImageLoader> {
|
||||||
let file = std::fs::read(path)?;
|
for start in find_iter(data, &[0x1f, 0x8b]) {
|
||||||
|
if let Ok(elf) = ElfImageLoader::load_gz(&data[start..]) {
|
||||||
for start in find_iter(file.as_slice(), &[0x1f, 0x8b]) {
|
|
||||||
if let Ok(elf) = ElfImageLoader::load_gz(&file[start..]) {
|
|
||||||
return Ok(elf);
|
return Ok(elf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for start in find_iter(file.as_slice(), &[0xfd, 0x37, 0x7a, 0x58]) {
|
for start in find_iter(data, &[0xfd, 0x37, 0x7a, 0x58]) {
|
||||||
if let Ok(elf) = ElfImageLoader::load_xz(&file[start..]) {
|
if let Ok(elf) = ElfImageLoader::load_xz(&data[start..]) {
|
||||||
return Ok(elf);
|
return Ok(elf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ use boot::BootState;
|
|||||||
use log::{debug, trace, warn};
|
use log::{debug, trace, warn};
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
||||||
use std::fs::read;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -40,60 +39,60 @@ pub struct XenClient {
|
|||||||
call: XenCall,
|
call: XenCall,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct BlockDeviceRef {
|
pub struct BlockDeviceRef {
|
||||||
pub path: String,
|
pub path: String,
|
||||||
pub major: u32,
|
pub major: u32,
|
||||||
pub minor: u32,
|
pub minor: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainDisk<'a> {
|
pub struct DomainDisk {
|
||||||
pub vdev: &'a str,
|
pub vdev: String,
|
||||||
pub block: &'a BlockDeviceRef,
|
pub block: BlockDeviceRef,
|
||||||
pub writable: bool,
|
pub writable: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainFilesystem<'a> {
|
pub struct DomainFilesystem {
|
||||||
pub path: &'a str,
|
pub path: String,
|
||||||
pub tag: &'a str,
|
pub tag: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainNetworkInterface<'a> {
|
pub struct DomainNetworkInterface {
|
||||||
pub mac: &'a str,
|
pub mac: String,
|
||||||
pub mtu: u32,
|
pub mtu: u32,
|
||||||
pub bridge: Option<&'a str>,
|
pub bridge: Option<String>,
|
||||||
pub script: Option<&'a str>,
|
pub script: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainChannel {
|
pub struct DomainChannel {
|
||||||
pub typ: String,
|
pub typ: String,
|
||||||
pub initialized: bool,
|
pub initialized: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainEventChannel<'a> {
|
pub struct DomainEventChannel {
|
||||||
pub name: &'a str,
|
pub name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DomainConfig<'a> {
|
pub struct DomainConfig {
|
||||||
pub backend_domid: u32,
|
pub backend_domid: u32,
|
||||||
pub name: &'a str,
|
pub name: String,
|
||||||
pub max_vcpus: u32,
|
pub max_vcpus: u32,
|
||||||
pub mem_mb: u64,
|
pub mem_mb: u64,
|
||||||
pub kernel_path: &'a str,
|
pub kernel: Vec<u8>,
|
||||||
pub initrd_path: &'a str,
|
pub initrd: Vec<u8>,
|
||||||
pub cmdline: &'a str,
|
pub cmdline: String,
|
||||||
pub disks: Vec<DomainDisk<'a>>,
|
pub disks: Vec<DomainDisk>,
|
||||||
pub use_console_backend: Option<&'a str>,
|
pub use_console_backend: Option<String>,
|
||||||
pub channels: Vec<DomainChannel>,
|
pub channels: Vec<DomainChannel>,
|
||||||
pub vifs: Vec<DomainNetworkInterface<'a>>,
|
pub vifs: Vec<DomainNetworkInterface>,
|
||||||
pub filesystems: Vec<DomainFilesystem<'a>>,
|
pub filesystems: Vec<DomainFilesystem>,
|
||||||
pub event_channels: Vec<DomainEventChannel<'a>>,
|
pub event_channels: Vec<DomainEventChannel>,
|
||||||
pub extra_keys: Vec<(String, String)>,
|
pub extra_keys: Vec<(String, String)>,
|
||||||
pub extra_rw_paths: Vec<String>,
|
pub extra_rw_paths: Vec<String>,
|
||||||
}
|
}
|
||||||
@ -117,7 +116,7 @@ impl XenClient {
|
|||||||
Ok(XenClient { store, call })
|
Ok(XenClient { store, call })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
|
pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> {
|
||||||
let mut domain = CreateDomain {
|
let mut domain = CreateDomain {
|
||||||
max_vcpus: config.max_vcpus,
|
max_vcpus: config.max_vcpus,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@ -143,7 +142,7 @@ impl XenClient {
|
|||||||
&self,
|
&self,
|
||||||
domid: u32,
|
domid: u32,
|
||||||
domain: &CreateDomain,
|
domain: &CreateDomain,
|
||||||
config: &DomainConfig<'_>,
|
config: &DomainConfig,
|
||||||
) -> Result<CreatedDomain> {
|
) -> Result<CreatedDomain> {
|
||||||
trace!(
|
trace!(
|
||||||
"XenClient init domid={} domain={:?} config={:?}",
|
"XenClient init domid={} domain={:?} config={:?}",
|
||||||
@ -237,9 +236,9 @@ impl XenClient {
|
|||||||
&Uuid::from_bytes(domain.handle).to_string(),
|
&Uuid::from_bytes(domain.handle).to_string(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
tx.write_string(format!("{}/name", dom_path).as_str(), config.name)
|
tx.write_string(format!("{}/name", dom_path).as_str(), &config.name)
|
||||||
.await?;
|
.await?;
|
||||||
tx.write_string(format!("{}/name", vm_path).as_str(), config.name)
|
tx.write_string(format!("{}/name", vm_path).as_str(), &config.name)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
for (key, value) in &config.extra_keys {
|
for (key, value) in &config.extra_keys {
|
||||||
@ -257,7 +256,7 @@ impl XenClient {
|
|||||||
|
|
||||||
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
||||||
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
|
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
|
||||||
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
|
let image_loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
|
||||||
|
|
||||||
let xenstore_evtchn: u32;
|
let xenstore_evtchn: u32;
|
||||||
let xenstore_mfn: u64;
|
let xenstore_mfn: u64;
|
||||||
@ -270,18 +269,17 @@ impl XenClient {
|
|||||||
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||||
#[cfg(target_arch = "aarch64")]
|
#[cfg(target_arch = "aarch64")]
|
||||||
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
|
||||||
let initrd = read(config.initrd_path)?;
|
|
||||||
state = boot
|
state = boot
|
||||||
.initialize(
|
.initialize(
|
||||||
&mut arch,
|
&mut arch,
|
||||||
&image_loader,
|
&image_loader,
|
||||||
initrd.as_slice(),
|
&config.initrd,
|
||||||
config.max_vcpus,
|
config.max_vcpus,
|
||||||
config.mem_mb,
|
config.mem_mb,
|
||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
boot.boot(&mut arch, &mut state, config.cmdline).await?;
|
boot.boot(&mut arch, &mut state, &config.cmdline).await?;
|
||||||
xenstore_evtchn = state.store_evtchn;
|
xenstore_evtchn = state.store_evtchn;
|
||||||
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
|
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
|
||||||
p2m = boot.phys.p2m;
|
p2m = boot.phys.p2m;
|
||||||
@ -291,19 +289,9 @@ impl XenClient {
|
|||||||
let tx = self.store.transaction().await?;
|
let tx = self.store.transaction().await?;
|
||||||
tx.write_string(format!("{}/image/os_type", vm_path).as_str(), "linux")
|
tx.write_string(format!("{}/image/os_type", vm_path).as_str(), "linux")
|
||||||
.await?;
|
.await?;
|
||||||
tx.write_string(
|
|
||||||
format!("{}/image/kernel", vm_path).as_str(),
|
|
||||||
config.kernel_path,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tx.write_string(
|
|
||||||
format!("{}/image/ramdisk", vm_path).as_str(),
|
|
||||||
config.initrd_path,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tx.write_string(
|
tx.write_string(
|
||||||
format!("{}/image/cmdline", vm_path).as_str(),
|
format!("{}/image/cmdline", vm_path).as_str(),
|
||||||
config.cmdline,
|
&config.cmdline,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -352,7 +340,8 @@ impl XenClient {
|
|||||||
&DomainChannel {
|
&DomainChannel {
|
||||||
typ: config
|
typ: config
|
||||||
.use_console_backend
|
.use_console_backend
|
||||||
.unwrap_or("xenconsoled")
|
.clone()
|
||||||
|
.unwrap_or("xenconsoled".to_string())
|
||||||
.to_string(),
|
.to_string(),
|
||||||
initialized: true,
|
initialized: true,
|
||||||
},
|
},
|
||||||
@ -429,7 +418,7 @@ impl XenClient {
|
|||||||
.await?;
|
.await?;
|
||||||
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
|
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
|
||||||
self.store
|
self.store
|
||||||
.write_string(&format!("{}/name", channel_path), channel.name)
|
.write_string(&format!("{}/name", channel_path), &channel.name)
|
||||||
.await?;
|
.await?;
|
||||||
self.store
|
self.store
|
||||||
.write_string(&format!("{}/channel", channel_path), &id.to_string())
|
.write_string(&format!("{}/channel", channel_path), &id.to_string())
|
||||||
@ -447,7 +436,7 @@ impl XenClient {
|
|||||||
backend_domid: u32,
|
backend_domid: u32,
|
||||||
domid: u32,
|
domid: u32,
|
||||||
index: usize,
|
index: usize,
|
||||||
disk: &DomainDisk<'_>,
|
disk: &DomainDisk,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let id = (202 << 8) | (index << 4) as u64;
|
let id = (202 << 8) | (index << 4) as u64;
|
||||||
let backend_items: Vec<(&str, String)> = vec![
|
let backend_items: Vec<(&str, String)> = vec![
|
||||||
@ -567,7 +556,7 @@ impl XenClient {
|
|||||||
backend_domid: u32,
|
backend_domid: u32,
|
||||||
domid: u32,
|
domid: u32,
|
||||||
index: usize,
|
index: usize,
|
||||||
filesystem: &DomainFilesystem<'_>,
|
filesystem: &DomainFilesystem,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let id = 90 + index as u64;
|
let id = 90 + index as u64;
|
||||||
let backend_items: Vec<(&str, String)> = vec![
|
let backend_items: Vec<(&str, String)> = vec![
|
||||||
@ -605,7 +594,7 @@ impl XenClient {
|
|||||||
backend_domid: u32,
|
backend_domid: u32,
|
||||||
domid: u32,
|
domid: u32,
|
||||||
index: usize,
|
index: usize,
|
||||||
vif: &DomainNetworkInterface<'_>,
|
vif: &DomainNetworkInterface,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let id = 20 + index as u64;
|
let id = 20 + index as u64;
|
||||||
let mut backend_items: Vec<(&str, String)> = vec![
|
let mut backend_items: Vec<(&str, String)> = vec![
|
||||||
@ -619,12 +608,12 @@ impl XenClient {
|
|||||||
];
|
];
|
||||||
|
|
||||||
if vif.bridge.is_some() {
|
if vif.bridge.is_some() {
|
||||||
backend_items.extend_from_slice(&[("bridge", vif.bridge.unwrap().to_string())]);
|
backend_items.extend_from_slice(&[("bridge", vif.bridge.clone().unwrap())]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if vif.script.is_some() {
|
if vif.script.is_some() {
|
||||||
backend_items.extend_from_slice(&[
|
backend_items.extend_from_slice(&[
|
||||||
("script", vif.script.unwrap().to_string()),
|
("script", vif.script.clone().unwrap()),
|
||||||
("hotplug-status", "".to_string()),
|
("hotplug-status", "".to_string()),
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
|
Reference in New Issue
Block a user