Compare commits

...

78 Commits

Author SHA1 Message Date
4175e1e3fe chore: release (#181)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-06-24 05:01:32 +00:00
1bdf3bda87 build(deps): bump regex from 1.10.4 to 1.10.5 (#187)
Bumps [regex](https://github.com/rust-lang/regex) from 1.10.4 to 1.10.5.
- [Release notes](https://github.com/rust-lang/regex/releases)
- [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/regex/compare/1.10.4...1.10.5)

---
updated-dependencies:
- dependency-name: regex
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:58:15 +00:00
9a45d754bf chore(xenplatform): elf loader should async load the file (#197)
* fix(build): remove unused environment variables

* chore(xenplatform): elf loader should async load the file
2024-06-23 12:57:01 +00:00
6c3fc54688 build(deps): bump url from 2.5.0 to 2.5.2 (#193)
Bumps [url](https://github.com/servo/rust-url) from 2.5.0 to 2.5.2.
- [Release notes](https://github.com/servo/rust-url/releases)
- [Commits](https://github.com/servo/rust-url/compare/v2.5.0...v2.5.2)

---
updated-dependencies:
- dependency-name: url
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:20 +00:00
af6a1a3ad2 build(deps): bump MarcoIeni/release-plz-action from 0.5.61 to 0.5.62 (#192)
Bumps [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action) from 0.5.61 to 0.5.62.
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](7566221bba...86afd21a7b)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:17 +00:00
7bef74fadf build(deps): bump actions/checkout from 4.1.6 to 4.1.7 (#190)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](a5ac7e51b4...692973e3d9)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:15 +00:00
ec1b6d4370 build(deps): bump clap from 4.5.4 to 4.5.7 (#188)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.4 to 4.5.7.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.4...v4.5.7)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:10 +00:00
b2d146713b build(deps): bump redb from 2.1.0 to 2.1.1 (#186)
Bumps [redb](https://github.com/cberner/redb) from 2.1.0 to 2.1.1.
- [Release notes](https://github.com/cberner/redb/releases)
- [Changelog](https://github.com/cberner/redb/blob/master/CHANGELOG.md)
- [Commits](https://github.com/cberner/redb/compare/v2.1.0...v2.1.1)

---
updated-dependencies:
- dependency-name: redb
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:04 +00:00
b730b08d6e build(deps): bump step-security/harden-runner from 2.8.0 to 2.8.1 (#185)
Bumps [step-security/harden-runner](https://github.com/step-security/harden-runner) from 2.8.0 to 2.8.1.
- [Release notes](https://github.com/step-security/harden-runner/releases)
- [Commits](f086349bfa...17d0e2bd7d)

---
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:15:02 +00:00
87758d7ae9 build(deps): bump tokio-tun from 0.11.4 to 0.11.5 (#183)
Bumps [tokio-tun](https://github.com/yaa110/tokio-tun) from 0.11.4 to 0.11.5.
- [Commits](https://github.com/yaa110/tokio-tun/compare/0.11.4...0.11.5)

---
updated-dependencies:
- dependency-name: tokio-tun
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-23 12:14:59 +00:00
349664abf1 chore(git): delete old gitmodules 2024-06-23 01:50:35 -07:00
ef068e790c chore(xen): move device creation into transaction interface (#196)
* chore(xen): move domain creation to xenplatform

* chore(xen): move device transactions into separate interface
2024-06-21 17:38:19 +00:00
6f39f115b7 chore(xen): split platform support into separate crate (#195) 2024-06-21 08:10:45 +00:00
23c7302c04 docs: first pass of krata as an isolation engine 2024-06-20 19:57:18 -07:00
e219f3adf1 feature(xen): dynamic platform architecture (#194)
* wip hvm

* feat: move platform stuff all into it's own thing

* hvm work

* more hvm work

* more hvm work

* feat: rework to support multiple platforms

* hvm nonredist

* more hvm work

* more hvm work

* pvh work

* work on loading cmdline

* implement initrd loading for pvh

* partially working pvh support

* fix merge issues

* pvh works!

* swap over to pv support

* remove old kernel stuff

* fix support for pv

* pvh is gone for now

* fix(runtime): debug should be respected

* fix(xen): arm64 is currently unsupported, treat it as such at runtime

* fix(examples): use architecture cfg for boot example

* fix(x86): use IOMMU only when needed for passthrough

* chore(build): print kernel architecture during fetch
2024-06-21 02:42:45 +00:00
2c7210d85e build(deps): bump prost-build from 0.12.4 to 0.12.6 (#170)
Bumps [prost-build](https://github.com/tokio-rs/prost) from 0.12.4 to 0.12.6.
- [Release notes](https://github.com/tokio-rs/prost/releases)
- [Commits](https://github.com/tokio-rs/prost/compare/v0.12.4...v0.12.6)

---
updated-dependencies:
- dependency-name: prost-build
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:24:00 +00:00
ade37e92f3 build(deps): bump serde from 1.0.202 to 1.0.203 (#172)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.202 to 1.0.203.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.202...v1.0.203)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:52 +00:00
ef3bc83069 build(deps): bump async-compression from 0.4.10 to 0.4.11 (#175)
Bumps [async-compression](https://github.com/Nullus157/async-compression) from 0.4.10 to 0.4.11.
- [Release notes](https://github.com/Nullus157/async-compression/releases)
- [Changelog](https://github.com/Nullus157/async-compression/blob/main/CHANGELOG.md)
- [Commits](https://github.com/Nullus157/async-compression/compare/v0.4.10...v0.4.11)

---
updated-dependencies:
- dependency-name: async-compression
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:43 +00:00
14084f13d8 build(deps): bump tokio from 1.37.0 to 1.38.0 (#176)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.37.0 to 1.38.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.37.0...tokio-1.38.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:35 +00:00
fbc953cf46 build(deps): bump actions/create-github-app-token from 1.10.0 to 1.10.1 (#177)
Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1.10.0 to 1.10.1.
- [Release notes](https://github.com/actions/create-github-app-token/releases)
- [Commits](a0de6af839...c8f55efbd4)

---
updated-dependencies:
- dependency-name: actions/create-github-app-token
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:26 +00:00
fd7974fc98 build(deps): bump MarcoIeni/release-plz-action from 0.5.58 to 0.5.61 (#178)
Bumps [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action) from 0.5.58 to 0.5.61.
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](7fe60ae5d7...7566221bba)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:18 +00:00
d17769d69f build(deps): bump toml from 0.8.13 to 0.8.14 (#179)
Bumps [toml](https://github.com/toml-rs/toml) from 0.8.13 to 0.8.14.
- [Commits](https://github.com/toml-rs/toml/compare/toml-v0.8.13...toml-v0.8.14)

---
updated-dependencies:
- dependency-name: toml
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 19:23:11 +00:00
7ba04f26a3 fix(os): alpine v3.20 requires copying kernel config before grub-mkconfig (#180)
There is currently a bug in the Xen support for Alpine where /boot/config-lts
is expected to exist but in Alpine /boot/config-VERSION-lts is the only file
available. This change copies the config to /boot/config-lts to fix the build.
2024-06-04 17:00:49 +00:00
11235b6837 --- (#168)
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 06:29:06 +00:00
e8849048db --- (#167)
updated-dependencies:
- dependency-name: prost-types
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 06:29:03 +00:00
cd15337ad8 build(deps): bump libc from 0.2.154 to 0.2.155 (#163)
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.154 to 0.2.155.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.154...0.2.155)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 04:44:36 +00:00
037261991a build(deps): bump anyhow from 1.0.83 to 1.0.86 (#164)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.83 to 1.0.86.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.83...1.0.86)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 04:44:33 +00:00
67fb5891e4 build(deps): bump prost from 0.12.4 to 0.12.6 (#165)
Bumps [prost](https://github.com/tokio-rs/prost) from 0.12.4 to 0.12.6.
- [Release notes](https://github.com/tokio-rs/prost/releases)
- [Commits](https://github.com/tokio-rs/prost/compare/v0.12.4...v0.12.6)

---
updated-dependencies:
- dependency-name: prost
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 04:44:30 +00:00
d1f6d1e742 --- (#166)
updated-dependencies:
- dependency-name: ratatui
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 04:44:27 +00:00
18fc2c3a7e build(deps): bump thiserror from 1.0.60 to 1.0.61 (#162)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.60 to 1.0.61.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.60...1.0.61)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-22 04:44:24 +00:00
54486b119b build(deps): bump actions/checkout from 4.1.5 to 4.1.6 (#161)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.5 to 4.1.6.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](44c2b7a8a4...a5ac7e51b4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-17 17:48:51 +00:00
04a633d501 build(deps): bump MarcoIeni/release-plz-action from 0.5.57 to 0.5.58 (#152)
Bumps [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action) from 0.5.57 to 0.5.58.
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](a290444218...7fe60ae5d7)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-16 19:11:46 +00:00
612203f014 build(deps): bump serde from 1.0.201 to 1.0.202 (#154)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.201 to 1.0.202.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.201...v1.0.202)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-16 19:09:33 +00:00
e9ba336f68 build(deps): bump toml from 0.8.12 to 0.8.13 (#155)
Bumps [toml](https://github.com/toml-rs/toml) from 0.8.12 to 0.8.13.
- [Commits](https://github.com/toml-rs/toml/compare/toml-v0.8.12...toml-v0.8.13)

---
updated-dependencies:
- dependency-name: toml
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-16 19:09:14 +00:00
94790ce7dc fix(build): kernel fetch should use host target (#159) 2024-05-16 18:13:04 +00:00
023063327f fix(build): use host resolv.conf in os build chroot (#153)
The resolv.conf that the stage1 os script generates is fine for actual use,
but our GitHub workflows now uses the Step Security hardened runner action.
This action replaces the nameserver so that all lookups go through that,
but because the chroot calls apk add, it needs to contact the internet.
On the GitHub workflows, the OS build currently fails since the hardened
runner cannot access other nameservers.
2024-05-16 08:41:42 +00:00
d46aa878af feat(build): fetch kernels from image registry instead of building the kernel (#156)
Now that we have the kernel build infrastructure at https://github.com/edera-dev/kernels
it makes sense to drop building the kernel and download the kernel images directly.

This change introduces a ./hack/kernel/fetch.sh script which is backed by crates/build
We utilize the OCI infrastructure itself to download the kernel image. The DEV guide
has been updated to include calling the fetch script, and the OS builder now uses this
method instead. Due to the lack of need for the kernel build infra to exist here now,
it has also been removed. This should significantly speed up full builds.

This change will also enable us to turn on os build workflows for all PRs. We should
likely make the OS status checks required once this is merged.
2024-05-16 08:40:58 +00:00
2462a99fdc chore(dependabot): group some dependency updates (#157)
We have a need to ensure great security while also ensuring that dependabot
does not constantly provide multiple PRs. After all, when something becomes
too time consuming it risks not being handled with care. With grouped updates,
version bumps will get grouped together, but security updates will still be
indvidualized. This makes it safer for us to enable grouped dependency updates.
2024-05-16 08:39:50 +00:00
fc18bc6a18 feat(runtime): concurrent ip allocation (#151)
Previously, krata runtime allowed a single permit when performing operations.
This was necessary because the only IP allocation storage was xenstore, and
the commit of xenstore data happens after allocation. This commit introduces
IpVendor, a service which vends IPv4 and IPv6 addresses to guests using a
linear address strategy within an IP network space. The IpVendor table is
initialized from xenstore, and from there on out, the in-memory table
is the source of truth. This implementation is not perfect, but it will allow
us to lift the single permit limit, allowing guests to start concurrently.
2024-05-14 18:29:12 +00:00
b0f0934fa4 build(deps): bump async-compression from 0.4.9 to 0.4.10 (#145)
Bumps [async-compression](https://github.com/Nullus157/async-compression) from 0.4.9 to 0.4.10.
- [Release notes](https://github.com/Nullus157/async-compression/releases)
- [Changelog](https://github.com/Nullus157/async-compression/blob/main/CHANGELOG.md)
- [Commits](https://github.com/Nullus157/async-compression/commits/v0.4.10)

---
updated-dependencies:
- dependency-name: async-compression
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 19:23:40 +00:00
f6721d5e2c build(deps): bump actions/checkout from 4.1.4 to 4.1.5 (#149)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.4 to 4.1.5.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](0ad4b8fada...44c2b7a8a4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 19:23:24 +00:00
0d43a8be54 build(deps): bump MarcoIeni/release-plz-action from 0.5.55 to 0.5.57 (#150)
Bumps [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action) from 0.5.55 to 0.5.57.
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](76e66a600f...a290444218)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 19:23:08 +00:00
0193921053 Pin actions to digests and introduce Step Security Harden Runners (#137)
Signed-off-by: Jed Salazar <jedsalazar@gmail.com>
2024-05-11 00:00:56 +00:00
485f6e8319 chore(kernel): upgrade to kernel 6.8.9 (#143) 2024-05-10 17:30:06 +00:00
09ee251c9e Fix typo and nit (#144)
Signed-off-by: Jed Salazar <jed@edera.dev>
2024-05-10 01:44:42 +00:00
75011ef8cb fix(oci): use mirror.gcr.io as a mirror to docker hub (#141) 2024-05-09 17:30:27 +00:00
69c7af5220 build(deps): bump thiserror from 1.0.59 to 1.0.60 (#135)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.59 to 1.0.60.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.59...1.0.60)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 19:14:17 +00:00
a364abe887 build(deps): bump anyhow from 1.0.82 to 1.0.83 (#136)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.82 to 1.0.83.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.82...1.0.83)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 19:14:00 +00:00
95accc6d3f build(deps): bump serde from 1.0.200 to 1.0.201 (#139)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.200 to 1.0.201.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.200...v1.0.201)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 19:13:44 +00:00
04fb6cce8e build(deps): bump serde_json from 1.0.116 to 1.0.117 (#140)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.116 to 1.0.117.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.116...v1.0.117)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 19:13:27 +00:00
5420214bdd build(deps): bump sysinfo from 0.30.11 to 0.30.12 (#131)
Bumps [sysinfo](https://github.com/GuillaumeGomez/sysinfo) from 0.30.11 to 0.30.12.
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/commits)

---
updated-dependencies:
- dependency-name: sysinfo
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-08 19:13:11 +00:00
b4f26787d4 fix(oci): remove file size limit (#142)
the addons.squashfs file often is fairly large due to the GPU modules containing a lot of code
2024-05-08 19:09:33 +00:00
51dff0361d fix(xenclient): use a single transaction for device setup (#130) 2024-05-05 20:39:53 +00:00
3187830ff5 build(deps): bump serde from 1.0.199 to 1.0.200 (#129)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.199 to 1.0.200.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.199...v1.0.200)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-02 17:19:20 +00:00
338322619c build(deps): bump base64 from 0.22.0 to 0.22.1 (#128)
Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.22.0 to 0.22.1.
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.22.0...v0.22.1)

---
updated-dependencies:
- dependency-name: base64
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-01 16:27:53 +00:00
511f83bfd9 fix: dev guide should mention copying kernel addons (fixes #125) (#126) 2024-04-30 21:46:45 +00:00
b0f5c38fb0 build(deps): bump async-compression from 0.4.8 to 0.4.9 (#120)
Bumps [async-compression](https://github.com/Nullus157/async-compression) from 0.4.8 to 0.4.9.
- [Release notes](https://github.com/Nullus157/async-compression/releases)
- [Changelog](https://github.com/Nullus157/async-compression/blob/main/CHANGELOG.md)
- [Commits](https://github.com/Nullus157/async-compression/commits)

---
updated-dependencies:
- dependency-name: async-compression
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 16:31:41 +00:00
520018a86d build(deps): bump serde from 1.0.198 to 1.0.199 (#122)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.198 to 1.0.199.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.198...v1.0.199)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 16:31:20 +00:00
39c2e58fbc build(deps): bump flate2 from 1.0.28 to 1.0.30 (#123)
Bumps [flate2](https://github.com/rust-lang/flate2-rs) from 1.0.28 to 1.0.30.
- [Release notes](https://github.com/rust-lang/flate2-rs/releases)
- [Commits](https://github.com/rust-lang/flate2-rs/compare/1.0.28...1.0.30)

---
updated-dependencies:
- dependency-name: flate2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 16:31:00 +00:00
d1d6eb5c8b build(deps): bump libc from 0.2.153 to 0.2.154 (#124)
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.153 to 0.2.154.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.153...0.2.154)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 16:30:44 +00:00
84920a88ab feat: pci passthrough (#114)
* feat: pci passthrough

* feat: guest device management

* feat: addons mounting and kernel modules support

* feat: more pci work

* fix: kernel build squashfs fixes

* fix: e820entry should be available on all platforms
2024-04-29 17:02:20 +00:00
bece7f33c7 feat: CONTRIBUTING.md and Bug Report template (#117)
This change introduces an initial CONTRIBUTING.md doc and a template for
bug reports.

Signed-off-by: Khionu Sybiern <khionu@edera.dev>
2024-04-24 21:01:52 +00:00
95fbc62486 chore: release (#87)
Signed-off-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-04-23 09:41:56 +00:00
284ed8f17b feat: implement guest exec (#107) 2024-04-22 20:13:43 +00:00
82576df7b7 feat: implement kernel / initrd oci image support (#103)
* feat: implement kernel / initrd oci image support

* fix: implement image urls more faithfully
2024-04-22 19:48:45 +00:00
1b90eedbcd build(deps): bump thiserror from 1.0.58 to 1.0.59 (#104)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.58 to 1.0.59.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.58...1.0.59)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:20 +00:00
aa941c6e87 build(deps): bump redb from 2.0.0 to 2.1.0 (#106)
Bumps [redb](https://github.com/cberner/redb) from 2.0.0 to 2.1.0.
- [Release notes](https://github.com/cberner/redb/releases)
- [Changelog](https://github.com/cberner/redb/blob/master/CHANGELOG.md)
- [Commits](https://github.com/cberner/redb/compare/v2.0.0...v2.1.0)

---
updated-dependencies:
- dependency-name: redb
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:13 +00:00
d0bf3c4c77 build(deps): bump reqwest from 0.12.3 to 0.12.4 (#105)
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.12.3 to 0.12.4.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.12.3...v0.12.4)

---
updated-dependencies:
- dependency-name: reqwest
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-22 05:36:06 +00:00
38e892e249 feat: idm v2 (#102)
* feat: rebuild idm to separate transport from content

* feat: fast guest lookup table and host identification
2024-04-22 04:00:32 +00:00
1a90372037 build(deps): bump sysinfo from 0.30.10 to 0.30.11 (#99)
Bumps [sysinfo](https://github.com/GuillaumeGomez/sysinfo) from 0.30.10 to 0.30.11.
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.30.10...v0.30.11)

---
updated-dependencies:
- dependency-name: sysinfo
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 20:35:57 +00:00
4754cdd128 build(deps): bump rustls from 0.22.3 to 0.22.4 (#101)
Bumps [rustls](https://github.com/rustls/rustls) from 0.22.3 to 0.22.4.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.22.3...v/0.22.4)

---
updated-dependencies:
- dependency-name: rustls
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 20:35:44 +00:00
f843abcabf docs(dev): update order of setup instructions (#98)
This change corrects the steps order to have the krata daemon started
before starting the krata networking service

Co-authored-by: Khionu Sybiern <khionu@edera.dev>
2024-04-18 18:00:48 +00:00
e8d89d4d5b build(deps): bump serde from 1.0.197 to 1.0.198 (#97)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.197 to 1.0.198.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.197...v1.0.198)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-17 05:17:00 +00:00
4e9738b959 fix: oci cache store should fallback to copy when rename won't work (#96) 2024-04-16 17:05:24 +00:00
8135307283 feat: oci concurrency improvements (#95)
* feat: implement improved and detailed oci progress indication

* feat: implement on-disk indexes of images

* oci: utilize rw-lock for increased cache performance
2024-04-16 16:29:54 +00:00
e450ebd2a2 feat: oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls (#88)
* oci: retain bit-perfect copies of manifest and config on disk

* feat: oci tar format support

* feat: concurrent image pulls
2024-04-16 08:53:44 +00:00
79f7742caa build(deps): bump serde_json from 1.0.115 to 1.0.116 (#90)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.115 to 1.0.116.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.115...v1.0.116)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-16 07:17:11 +00:00
c3c18271b4 build(deps): bump ratatui from 0.26.1 to 0.26.2 (#89)
Bumps [ratatui](https://github.com/ratatui-org/ratatui) from 0.26.1 to 0.26.2.
- [Release notes](https://github.com/ratatui-org/ratatui/releases)
- [Changelog](https://github.com/ratatui-org/ratatui/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ratatui-org/ratatui/compare/v0.26.1...v0.26.2)

---
updated-dependencies:
- dependency-name: ratatui
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-16 07:16:53 +00:00
134 changed files with 7639 additions and 14752 deletions

34
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Bug Report
description: File a bug report.
title: "bug: "
labels: ["bug"]
assignees:
- edera-dev/engineering
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
placeholder: Tell us what you see!
value: "A bug happened!"
validations:
required: true
- type: input
id: version
attributes:
label: Version / Commit
description: What version of our software are you running?
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
render: shell

View File

@ -4,7 +4,15 @@ updates:
directory: "/"
schedule:
interval: "daily"
groups:
version-updates:
dependency-type: "all"
applies-to: "version-updates"
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"
groups:
version-updates:
dependency-type: "all"
applies-to: "version-updates"

View File

@ -11,10 +11,13 @@ jobs:
name: fmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
components: rustfmt
- run: ./hack/ci/install-linux-deps.sh
@ -23,7 +26,10 @@ jobs:
name: shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- run: ./hack/code/shellcheck.sh

View File

@ -27,18 +27,21 @@ jobs:
run:
shell: bash
steps:
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- run: git config --global core.autocrlf false && git config --global core.eol lf
if: ${{ matrix.platform.os == 'windows' }}
- uses: actions/checkout@v4
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
if: ${{ matrix.platform.os != 'darwin' }}
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- uses: homebrew/actions/setup-homebrew@master
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- run: ./hack/build/cargo.sh build --bin kratactl

View File

@ -1,32 +0,0 @@
name: kernel
on:
pull_request:
branches:
- main
paths:
- "kernel/**"
- "hack/ci/**"
merge_group:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
arch:
- x86_64
- aarch64
env:
TARGET_ARCH: "${{ matrix.arch }}"
name: kernel build ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/kernel/build.sh
env:
KRATA_KERNEL_BUILD_JOBS: "5"

View File

@ -16,25 +16,24 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: nightly server ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/dist/bundle.sh
env:
KRATA_KERNEL_BUILD_JOBS: "5"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: krata-bundle-systemd-${{ matrix.arch }}
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
compression-level: 0
- run: ./hack/dist/deb.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: krata-debian-${{ matrix.arch }}
path: "target/dist/*.deb"
@ -42,15 +41,13 @@ jobs:
- run: ./hack/dist/apk.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: krata-alpine-${{ matrix.arch }}
path: "target/dist/*_${{ matrix.arch }}.apk"
compression-level: 0
- run: ./hack/os/build.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: krata-os-${{ matrix.arch }}
path: "target/os/krata-${{ matrix.arch }}.qcow2"
@ -75,27 +72,30 @@ jobs:
run:
shell: bash
steps:
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- run: git config --global core.autocrlf false && git config --global core.eol lf
if: ${{ matrix.platform.os == 'windows' }}
- uses: actions/checkout@v4
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
if: ${{ matrix.platform.os != 'darwin' }}
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- uses: homebrew/actions/setup-homebrew@master
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- run: ./hack/build/cargo.sh build --release --bin kratactl
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl"
if: ${{ matrix.platform.os != 'windows' }}
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl.exe"

View File

@ -3,10 +3,6 @@ on:
pull_request:
branches:
- main
paths:
- "os/**"
- "hack/os/**"
- "hack/ci/**"
merge_group:
branches:
- main
@ -23,17 +19,18 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: os build ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/os/build.sh
env:
KRATA_KERNEL_BUILD_JOBS: "5"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: krata-os-${{ matrix.arch }}
path: "target/os/krata-${{ matrix.arch }}.qcow2"

View File

@ -25,28 +25,23 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: release-binaries server ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/dist/bundle.sh
env:
KRATA_KERNEL_BUILD_JOBS: "5"
- run: "./hack/ci/assemble-release-assets.sh bundle-systemd ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
- run: ./hack/dist/deb.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- run: "./hack/ci/assemble-release-assets.sh debian ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*.deb"
- run: ./hack/dist/apk.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- run: "./hack/ci/assemble-release-assets.sh alpine ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*_${{ matrix.arch }}.apk"
- run: ./hack/os/build.sh
env:
KRATA_KERNEL_BUILD_SKIP: "1"
- run: "./hack/ci/assemble-release-assets.sh os ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/os/krata-${{ matrix.arch }}.qcow2"
- run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
env:
@ -72,16 +67,19 @@ jobs:
shell: bash
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
if: ${{ matrix.platform.os != 'darwin' }}
- uses: dtolnay/rust-toolchain@stable
with:
targets: "${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- uses: homebrew/actions/setup-homebrew@master
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- run: ./hack/build/cargo.sh build --release --bin kratactl

View File

@ -14,20 +14,23 @@ jobs:
name: release-plz
runs-on: ubuntu-latest
steps:
- uses: actions/create-github-app-token@v1
id: generate-token
with:
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
- uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
token: "${{ steps.generate-token.outputs.token }}"
- uses: dtolnay/rust-toolchain@stable
- run: ./hack/ci/install-linux-deps.sh
- name: release-plz
uses: MarcoIeni/release-plz-action@v0.5
env:
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/create-github-app-token@c8f55efbd427e7465d6da1106e7979bc8aaee856 # v1.10.1
id: generate-token
with:
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
fetch-depth: 0
token: "${{ steps.generate-token.outputs.token }}"
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
- run: ./hack/ci/install-linux-deps.sh
- name: release-plz
uses: MarcoIeni/release-plz-action@86afd21a7b114234aab55ba0005eed52f77d89e4 # v0.5.62
env:
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"

View File

@ -19,10 +19,13 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: server build ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/build/cargo.sh build
test:
@ -36,10 +39,13 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: server test ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
- run: ./hack/ci/install-linux-deps.sh
- run: ./hack/build/cargo.sh test
clippy:
@ -53,10 +59,13 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: server clippy ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
components: clippy
- run: ./hack/ci/install-linux-deps.sh
@ -72,10 +81,13 @@ jobs:
TARGET_ARCH: "${{ matrix.arch }}"
name: server initrd ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
with:
egress-policy: audit
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
with:
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
- run: ./hack/ci/install-linux-deps.sh

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "vendor"]
path = vendor
url = https://github.com/edera-dev/krata-vendor.git

View File

@ -6,6 +6,37 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.0.11](https://github.com/edera-dev/krata/compare/v0.0.10...v0.0.11) - 2024-06-23
### Added
- pci passthrough ([#114](https://github.com/edera-dev/krata/pull/114))
- *(runtime)* concurrent ip allocation ([#151](https://github.com/edera-dev/krata/pull/151))
- *(xen)* dynamic platform architecture ([#194](https://github.com/edera-dev/krata/pull/194))
### Fixed
- *(oci)* remove file size limit ([#142](https://github.com/edera-dev/krata/pull/142))
- *(oci)* use mirror.gcr.io as a mirror to docker hub ([#141](https://github.com/edera-dev/krata/pull/141))
### Other
- first pass of krata as an isolation engine
- *(xen)* split platform support into separate crate ([#195](https://github.com/edera-dev/krata/pull/195))
- *(xen)* move device creation into transaction interface ([#196](https://github.com/edera-dev/krata/pull/196))
## [0.0.10](https://github.com/edera-dev/krata/compare/v0.0.9...v0.0.10) - 2024-04-22
### Added
- implement guest exec ([#107](https://github.com/edera-dev/krata/pull/107))
- implement kernel / initrd oci image support ([#103](https://github.com/edera-dev/krata/pull/103))
- idm v2 ([#102](https://github.com/edera-dev/krata/pull/102))
- oci concurrency improvements ([#95](https://github.com/edera-dev/krata/pull/95))
- oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls ([#88](https://github.com/edera-dev/krata/pull/88))
### Fixed
- oci cache store should fallback to copy when rename won't work ([#96](https://github.com/edera-dev/krata/pull/96))
### Other
- update Cargo.lock dependencies
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
### Added

34
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,34 @@
# Contributing to Krata
Welcome! We're very glad you're reading this; Edera is excited for all kinds of contributions! Please read the following to ensure you're aware of our flow and policies.
## Before contributing
1. Please read our [Code of Conduct](CODE_OF_CONDUCT.md), which applies to all interactions in/with all Edera projects and venues.
2. Before opening an issue or PR, please try a few searches to see if there is overlap with existing conversations or WIP contributions.
3. For security or otherwise sensitive topics, please read our [Security Policy].
4. Ask questions! If you want to ask something, chances are someone else wants to ask it as well.
## Contributing Code
To get started with technical contributions, please read out [Development Guide]. If you're looking for something easy to tackle, [look for issues labeled `good first issue`][good-first-issue].
## Reporting bugs and other issues
While it's totally fine to simply bring it up on our Discord, we encourage opening an issue on GitHub using the Bug Report template.
## Pull Requests
1. For anything more than simple bug/doc fixes, please open a GitHub issue for tracking purposes.
- Else skip to step 3.
2. Discuss the change with the teams to ensure we have consensus on the change being welcome.
3. We encourage opening the PR sooner than later, and prefixing with `WIP:` so GitHub labels it as a Draft.
4. Please include a detailed list of changes that the PR makes
5. Once the PR is ready for review, remove the Draft status, and request a review from `edera-dev/engineering`.
6. After the review cycle concludes and we know you are ready for merging, a team member will submit the PR to the merge queue.
[Code of Conduct]: ./CODE_OF_CONDUCT.md
[Security Policy]: ./SECURITY.md
[Development Guide]: ./DEV.md
[good-first-issues]: https://github.com/edera-dev/krata/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22

324
Cargo.lock generated
View File

@ -94,9 +94,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.82"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "arrayvec"
@ -109,9 +109,9 @@ dependencies = [
[[package]]
name = "async-compression"
version = "0.4.8"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60"
checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5"
dependencies = [
"flate2",
"futures-core",
@ -245,9 +245,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64"
version = "0.22.0"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "bindgen"
@ -320,6 +320,26 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
[[package]]
name = "c2rust-bitfields"
version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b43c3f07ab0ef604fa6f595aa46ec2f8a22172c975e186f6f5bf9829a3b72c41"
dependencies = [
"c2rust-bitfields-derive",
]
[[package]]
name = "c2rust-bitfields-derive"
version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3cbc102e2597c9744c8bd8c15915d554300601c91a079430d309816b0912545"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "cassowary"
version = "0.3.0"
@ -366,6 +386,12 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "cgroups-rs"
version = "0.3.4"
@ -398,9 +424,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.4"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
dependencies = [
"clap_builder",
"clap_derive",
@ -408,9 +434,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.2"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
dependencies = [
"anstream",
"anstyle",
@ -420,9 +446,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.5.4"
version = "4.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6"
dependencies = [
"heck 0.5.0",
"proc-macro2",
@ -844,9 +870,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flate2"
version = "1.0.28"
version = "1.0.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae"
dependencies = [
"crc32fast",
"miniz_oxide",
@ -1306,12 +1332,6 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "indoc"
version = "2.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
[[package]]
name = "instant"
version = "0.1.12"
@ -1371,7 +1391,7 @@ dependencies = [
[[package]]
name = "krata"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"async-trait",
@ -1406,12 +1426,28 @@ dependencies = [
"serde",
]
[[package]]
name = "krata-buildtools"
version = "0.0.11"
dependencies = [
"anyhow",
"env_logger",
"krata-oci",
"krata-tokio-tar",
"oci-spec",
"scopeguard",
"tokio",
"tokio-stream",
"uuid",
]
[[package]]
name = "krata-ctl"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"async-stream",
"base64 0.22.1",
"clap",
"comfy-table",
"crossterm",
@ -1425,6 +1461,7 @@ dependencies = [
"prost-reflect",
"prost-types",
"ratatui",
"serde",
"serde_json",
"serde_yaml",
"termtree",
@ -1436,7 +1473,7 @@ dependencies = [
[[package]]
name = "krata-daemon"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"async-stream",
@ -1449,19 +1486,23 @@ dependencies = [
"krata",
"krata-oci",
"krata-runtime",
"krata-tokio-tar",
"log",
"prost",
"redb",
"scopeguard",
"serde",
"signal-hook",
"tokio",
"tokio-stream",
"toml",
"tonic",
"uuid",
]
[[package]]
name = "krata-guest"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"cgroups-rs",
@ -1475,6 +1516,7 @@ dependencies = [
"nix 0.28.0",
"oci-spec",
"path-absolutize",
"platform-info",
"rtnetlink",
"serde",
"serde_json",
@ -1485,7 +1527,7 @@ dependencies = [
[[package]]
name = "krata-network"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"async-trait",
@ -1509,7 +1551,7 @@ dependencies = [
[[package]]
name = "krata-oci"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"async-compression",
@ -1536,7 +1578,7 @@ dependencies = [
[[package]]
name = "krata-runtime"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"anyhow",
"backhand",
@ -1548,12 +1590,14 @@ dependencies = [
"krata-xenclient",
"krata-xenevtchn",
"krata-xengnt",
"krata-xenplatform",
"krata-xenstore",
"log",
"loopdev-3",
"serde_json",
"tokio",
"uuid",
"walkdir",
]
[[package]]
@ -1573,7 +1617,7 @@ dependencies = [
[[package]]
name = "krata-xencall"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"env_logger",
"libc",
@ -1586,28 +1630,25 @@ dependencies = [
[[package]]
name = "krata-xenclient"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"async-trait",
"elf",
"env_logger",
"flate2",
"indexmap 2.2.6",
"krata-xencall",
"krata-xenplatform",
"krata-xenstore",
"libc",
"log",
"memchr",
"nix 0.28.0",
"slice-copy",
"regex",
"thiserror",
"tokio",
"uuid",
"xz2",
]
[[package]]
name = "krata-xenevtchn"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"libc",
"log",
@ -1618,16 +1659,39 @@ dependencies = [
[[package]]
name = "krata-xengnt"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"libc",
"nix 0.28.0",
"thiserror",
]
[[package]]
name = "krata-xenplatform"
version = "0.0.11"
dependencies = [
"async-trait",
"c2rust-bitfields",
"elf",
"env_logger",
"flate2",
"indexmap 2.2.6",
"krata-xencall",
"libc",
"log",
"memchr",
"nix 0.28.0",
"regex",
"slice-copy",
"thiserror",
"tokio",
"uuid",
"xz2",
]
[[package]]
name = "krata-xenstore"
version = "0.0.9"
version = "0.0.11"
dependencies = [
"byteorder",
"env_logger",
@ -1651,9 +1715,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "libc"
version = "0.2.153"
version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libloading"
@ -1871,7 +1935,19 @@ checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
"bitflags 2.5.0",
"cfg-if",
"cfg_aliases",
"cfg_aliases 0.1.1",
"libc",
]
[[package]]
name = "nix"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
"bitflags 2.5.0",
"cfg-if",
"cfg_aliases 0.2.1",
"libc",
]
@ -2063,6 +2139,16 @@ version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
[[package]]
name = "platform-info"
version = "2.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5ff316b9c4642feda973c18f0decd6c8b0919d4722566f6e4337cce0dd88217"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "portable-atomic"
version = "1.6.0"
@ -2092,7 +2178,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
dependencies = [
"once_cell",
"toml_edit",
"toml_edit 0.19.15",
]
[[package]]
@ -2130,9 +2216,9 @@ dependencies = [
[[package]]
name = "prost"
version = "0.12.4"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922"
checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29"
dependencies = [
"bytes",
"prost-derive",
@ -2140,9 +2226,9 @@ dependencies = [
[[package]]
name = "prost-build"
version = "0.12.4"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1"
checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4"
dependencies = [
"bytes",
"heck 0.5.0",
@ -2161,9 +2247,9 @@ dependencies = [
[[package]]
name = "prost-derive"
version = "0.12.4"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48"
checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1"
dependencies = [
"anyhow",
"itertools",
@ -2178,7 +2264,7 @@ version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f5eec97d5d34bdd17ad2db2219aabf46b054c6c41bd5529767c9ce55be5898f"
dependencies = [
"base64 0.22.0",
"base64 0.22.1",
"once_cell",
"prost",
"prost-reflect-derive",
@ -2210,9 +2296,9 @@ dependencies = [
[[package]]
name = "prost-types"
version = "0.12.4"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe"
checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0"
dependencies = [
"prost",
]
@ -2264,21 +2350,21 @@ dependencies = [
[[package]]
name = "ratatui"
version = "0.26.1"
version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8"
checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef"
dependencies = [
"bitflags 2.5.0",
"cassowary",
"compact_str",
"crossterm",
"indoc",
"itertools",
"lru",
"paste",
"stability",
"strum",
"unicode-segmentation",
"unicode-truncate",
"unicode-width",
]
@ -2304,9 +2390,9 @@ dependencies = [
[[package]]
name = "redb"
version = "2.0.0"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1100a056c5dcdd4e5513d5333385223b26ef1bf92f31eb38f407e8c20549256"
checksum = "a6dd20d3cdeb9c7d2366a0b16b93b35b75aec15309fbeb7ce477138c9f68c8c0"
dependencies = [
"libc",
]
@ -2331,9 +2417,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.10.4"
version = "1.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
dependencies = [
"aho-corasick",
"memchr",
@ -2360,11 +2446,11 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "reqwest"
version = "0.12.3"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19"
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
dependencies = [
"base64 0.22.0",
"base64 0.22.1",
"bytes",
"futures-core",
"futures-util",
@ -2459,9 +2545,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.22.3"
version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c"
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
dependencies = [
"log",
"ring",
@ -2527,9 +2613,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.197"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
dependencies = [
"serde_derive",
]
@ -2546,9 +2632,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.197"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
dependencies = [
"proc-macro2",
"quote",
@ -2557,15 +2643,24 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.115"
version = "1.0.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0"
dependencies = [
"serde",
]
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
@ -2717,12 +2812,12 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
name = "stability"
version = "0.1.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce"
checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a"
dependencies = [
"quote",
"syn 1.0.109",
"syn 2.0.57",
]
[[package]]
@ -2820,9 +2915,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.30.10"
version = "0.30.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b"
checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae"
dependencies = [
"cfg-if",
"core-foundation-sys",
@ -2859,18 +2954,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "thiserror"
version = "1.0.58"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
@ -2894,9 +2989,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.37.0"
version = "1.38.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
dependencies = [
"backtrace",
"bytes",
@ -2923,9 +3018,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
@ -2956,12 +3051,12 @@ dependencies = [
[[package]]
name = "tokio-tun"
version = "0.11.4"
version = "0.11.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65d79912ba514490b1f5a574b585e19082bd2a6b238970c87c57a66bd77206b5"
checksum = "68f5381752d5832fc811f89d54fc334951aa435022f494190ba7151661f206df"
dependencies = [
"libc",
"nix 0.28.0",
"nix 0.29.0",
"thiserror",
"tokio",
]
@ -2981,10 +3076,25 @@ dependencies = [
]
[[package]]
name = "toml_datetime"
version = "0.6.5"
name = "toml"
version = "0.8.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit 0.22.14",
]
[[package]]
name = "toml_datetime"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
@ -2994,7 +3104,20 @@ checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.2.6",
"toml_datetime",
"winnow",
"winnow 0.5.40",
]
[[package]]
name = "toml_edit"
version = "0.22.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38"
dependencies = [
"indexmap 2.2.6",
"serde",
"serde_spanned",
"toml_datetime",
"winnow 0.6.6",
]
[[package]]
@ -3154,6 +3277,16 @@ version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
[[package]]
name = "unicode-truncate"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5fbabedabe362c618c714dbefda9927b5afc8e2a8102f47f081089a9019226"
dependencies = [
"itertools",
"unicode-width",
]
[[package]]
name = "unicode-width"
version = "0.1.11"
@ -3174,9 +3307,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
version = "2.5.0"
version = "2.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
dependencies = [
"form_urlencoded",
"idna",
@ -3505,6 +3638,15 @@ dependencies = [
"memchr",
]
[[package]]
name = "winnow"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352"
dependencies = [
"memchr",
]
[[package]]
name = "winreg"
version = "0.52.0"

View File

@ -1,5 +1,6 @@
[workspace]
members = [
"crates/build",
"crates/krata",
"crates/oci",
"crates/guest",
@ -11,12 +12,13 @@ members = [
"crates/xen/xenclient",
"crates/xen/xenevtchn",
"crates/xen/xengnt",
"crates/xen/xenplatform",
"crates/xen/xenstore",
]
resolver = "2"
[workspace.package]
version = "0.0.9"
version = "0.0.11"
homepage = "https://krata.dev"
license = "Apache-2.0"
repository = "https://github.com/edera-dev/krata"
@ -24,12 +26,14 @@ repository = "https://github.com/edera-dev/krata"
[workspace.dependencies]
anyhow = "1.0"
arrayvec = "0.7.4"
async-compression = "0.4.8"
async-compression = "0.4.11"
async-stream = "0.3.5"
async-trait = "0.1.80"
backhand = "0.15.0"
base64 = "0.22.1"
byteorder = "1"
bytes = "1.5.0"
c2rust-bitfields = "0.18.0"
cgroups-rs = "0.3.4"
circular-buffer = "0.1.7"
comfy-table = "7.1.1"
@ -56,34 +60,37 @@ oci-spec = "0.6.4"
once_cell = "1.19.0"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
prost = "0.12.4"
prost-build = "0.12.4"
platform-info = "2.0.3"
prost = "0.12.6"
prost-build = "0.12.6"
prost-reflect-build = "0.13.0"
prost-types = "0.12.4"
prost-types = "0.12.6"
rand = "0.8.5"
ratatui = "0.26.1"
redb = "2.0.0"
ratatui = "0.26.3"
redb = "2.1.1"
regex = "1.10.5"
rtnetlink = "0.14.1"
scopeguard = "1.2.0"
serde_json = "1.0.113"
serde_json = "1.0.117"
serde_yaml = "0.9"
sha256 = "1.5.0"
signal-hook = "0.3.17"
slice-copy = "0.3.0"
smoltcp = "0.11.0"
sysinfo = "0.30.10"
sysinfo = "0.30.12"
termtree = "0.4.1"
thiserror = "1.0"
tokio-tun = "0.11.4"
tokio-tun = "0.11.5"
toml = "0.8.14"
tonic-build = "0.11.0"
tower = "0.4.13"
udp-stream = "0.0.11"
url = "2.5.0"
url = "2.5.2"
walkdir = "2"
xz2 = "0.1"
[workspace.dependencies.clap]
version = "4.4.18"
version = "4.5.7"
features = ["derive"]
[workspace.dependencies.prost-reflect]
@ -91,12 +98,12 @@ version = "0.13.1"
features = ["derive"]
[workspace.dependencies.reqwest]
version = "0.12.3"
version = "0.12.4"
default-features = false
features = ["rustls-tls"]
[workspace.dependencies.serde]
version = "1.0.196"
version = "1.0.203"
features = ["derive"]
[workspace.dependencies.sys-mount]
@ -104,7 +111,7 @@ version = "3.0.0"
default-features = false
[workspace.dependencies.tokio]
version = "1.35.1"
version = "1.38.0"
features = ["full"]
[workspace.dependencies.tokio-stream]

25
DEV.md
View File

@ -19,7 +19,7 @@ it's corresponding code path from the above table.
| Component | Specification | Notes |
| ------------- | ------------- | --------------------------------------------------------------------------------- |
| Architecture | x86_64 | aarch64 support is still in development |
| Memory | At least 6GB | dom0 will need to be configured will lower memory limit to give krata guests room |
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata guests room |
| Xen | 4.17 | Temporary due to hardcoded interface version constants |
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
| rustup | any | Install Rustup from https://rustup.rs |
@ -31,7 +31,9 @@ it's corresponding code path from the above table.
2. Install required packages:
```sh
$ apt install git xen-system-amd64 build-essential libclang-dev musl-tools flex bison libelf-dev libssl-dev bc protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
$ apt install git xen-system-amd64 build-essential \
libclang-dev musl-tools flex bison libelf-dev libssl-dev bc \
protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
```
3. Install [rustup](https://rustup.rs) for managing a Rust environment.
@ -62,16 +64,23 @@ $ git clone https://github.com/edera-dev/krata.git krata
$ cd krata
```
6. Build a guest kernel image:
6. Fetch the guest kernel image:
```sh
$ ./hack/kernel/build.sh
$ ./hack/kernel/fetch.sh -u
```
7. Copy the guest kernel image at `target/kernel/kernel-x86_64` to `/var/lib/krata/guest/kernel` to have it automatically detected by kratad.
8. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
9. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
10. Run kratactl to launch a guest:
7. Copy the guest kernel artifacts to `/var/lib/krata/guest/kernel` so it is automatically detected by kratad:
```sh
$ mkdir -p /var/lib/krata/guest
$ cp target/kernel/kernel-x86_64 /var/lib/krata/guest/kernel
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/guest/addons.squashfs
```
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
10. Run `kratactl` to launch a guest:
```sh
$ ./hack/debug/kratactl.sh launch --attach alpine:latest

6
FAQ.md
View File

@ -2,7 +2,7 @@
## How does krata currently work?
The krata hypervisor makes it possible to launch OCI containers on a Xen hypervisor without utilizing the Xen userspace tooling. krata contains just enough of the userspace of Xen (reimplemented in Rust) to start an x86_64 Xen Linux PV guest, and implements a Linux init process that can boot an OCI container. It does so by converting an OCI image into a squashfs/erofs file and packaging basic startup data in a bundle which the init container can read.
The krata isolation engine makes it possible to launch OCI containers on a Xen hypervisor without utilizing the Xen userspace tooling. krata contains just enough of the userspace of Xen (reimplemented in Rust) to start an x86_64 Xen Linux PV guest, and implements a Linux init process that can boot an OCI container. It does so by converting an OCI image into a squashfs/erofs file and packaging basic startup data in a bundle which the init container can read.
In addition, due to the desire to reduce dependence on the dom0 network, krata contains a networking daemon called kratanet. kratanet listens for krata guests to startup and launches a userspace networking environment. krata guests can access the dom0 networking stack via the proxynat layer that makes it possible to communicate over UDP, TCP, and ICMP (echo only) to the outside world. In addition, each krata guest is provided a "gateway" IP (both in IPv4 and IPv6) which utilizes smoltcp to provide a virtual host. That virtual host in the future could dial connections into the container to access container networking resources.
@ -13,7 +13,3 @@ Xen is a very interesting technology, and Edera believes that type-1 hypervisors
## Why not utilize pvcalls to provide access to the host network?
pvcalls is extremely interesting, and although it is certainly possible to utilize pvcalls to get the job done, we chose to utilize userspace networking technology in order to enhance security. Our goal is to drop the use of all xen networking backend drivers within the kernel and have the guest talk directly to a userspace daemon, bypassing the vif (xen-netback) driver. Currently, in order to develop the networking layer, we utilize xen-netback and then use raw sockets to provide the userspace networking layer on the host.
## What are the future plans?
Edera is building a company to compete in the hypervisor space with open-source technology. More information to come soon on official channels.

View File

@ -1,6 +1,6 @@
# krata
The Edera Hypervisor
An isolation engine for securing compute workloads.
![license](https://img.shields.io/github/license/edera-dev/krata)
![discord](https://img.shields.io/discord/1207447453083766814?label=discord)
@ -16,13 +16,13 @@ The Edera Hypervisor
## Introduction
krata is a single-host hypervisor service built for OCI-compliant containers. It isolates containers using a type-1 hypervisor, providing workload isolation that can exceed the security level of KVM-based OCI-compliant runtimes.
krata is a single-host workload isolation service. It isolates workloads using a type-1 hypervisor, providing a tight security boundary while preserving performance.
krata utilizes the core of the Xen hypervisor, with a fully memory-safe Rust control plane to bring Xen tooling into a new secure era.
krata utilizes the core of the Xen hypervisor with a fully memory-safe Rust control plane.
## Hardware Support
| Architecture | Completion Level | Virtualization Technology |
| ------------ | ---------------- | ------------------------- |
| x86_64 | 100% Completed | Intel VT-x, AMD-V |
| aarch64 | 30% Completed | AArch64 virtualization |
| Architecture | Completion Level | Hardware Virtualization |
| ------------ | ---------------- | ------------------------------- |
| x86_64 | 100% Completed | None, Intel VT-x, AMD-V |
| aarch64 | 10% Completed | AArch64 virtualization |

25
crates/build/Cargo.toml Normal file
View File

@ -0,0 +1,25 @@
[package]
name = "krata-buildtools"
description = "Build tools for krata."
license.workspace = true
version.workspace = true
homepage.workspace = true
repository.workspace = true
edition = "2021"
resolver = "2"
publish = false
[dependencies]
anyhow = { workspace = true }
env_logger = { workspace = true }
oci-spec = { workspace = true }
scopeguard = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.11" }
krata-tokio-tar = { workspace = true }
uuid = { workspace = true }
[[bin]]
name = "build-fetch-kernel"
path = "bin/fetch_kernel.rs"

View File

@ -0,0 +1,121 @@
use std::{
env::{self, args},
path::PathBuf,
};
use anyhow::{anyhow, Result};
use env_logger::Env;
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciPackedFormat},
progress::OciProgressContext,
registry::OciPlatform,
};
use oci_spec::image::{Arch, Os};
use tokio::{
fs::{self, File},
io::BufReader,
};
use tokio_stream::StreamExt;
use tokio_tar::Archive;
use uuid::Uuid;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
fs::create_dir_all("target/kernel").await?;
let arch = env::var("TARGET_ARCH").map_err(|_| anyhow!("missing TARGET_ARCH env var"))?;
println!("kernel architecture: {}", arch);
let platform = OciPlatform::new(
Os::Linux,
match arch.as_str() {
"x86_64" => Arch::Amd64,
"aarch64" => Arch::ARM64,
_ => {
return Err(anyhow!("unknown architecture '{}'", arch));
}
},
);
let image = ImageName::parse(&args().nth(1).unwrap())?;
let mut cache_dir = std::env::temp_dir().clone();
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
fs::create_dir_all(&cache_dir).await?;
let _delete_cache_dir = scopeguard::guard(cache_dir.clone(), |dir| {
let _ = std::fs::remove_dir_all(dir);
});
let (context, _) = OciProgressContext::create();
let service = OciPackerService::new(None, &cache_dir, platform).await?;
let packed = service
.request(image.clone(), OciPackedFormat::Tar, false, context)
.await?;
let annotations = packed
.manifest
.item()
.annotations()
.clone()
.unwrap_or_default();
let Some(format) = annotations.get("dev.edera.kernel.format") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.format' annotation"
));
};
let Some(version) = annotations.get("dev.edera.kernel.version") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.version' annotation"
));
};
let Some(flavor) = annotations.get("dev.edera.kernel.flavor") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.flavor' annotation"
));
};
if format != "1" {
return Err(anyhow!("kernel format version '{}' is unknown", format));
}
let file = BufReader::new(File::open(packed.path).await?);
let mut archive = Archive::new(file);
let mut entries = archive.entries()?;
let kernel_image_tar_path = PathBuf::from("kernel/image");
let kernel_addons_tar_path = PathBuf::from("kernel/addons.squashfs");
let kernel_image_out_path = PathBuf::from(format!("target/kernel/kernel-{}", arch));
let kernel_addons_out_path = PathBuf::from(format!("target/kernel/addons-{}.squashfs", arch));
if kernel_image_out_path.exists() {
fs::remove_file(&kernel_image_out_path).await?;
}
if kernel_addons_out_path.exists() {
fs::remove_file(&kernel_addons_out_path).await?;
}
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?.to_path_buf();
if !entry.header().entry_type().is_file() {
continue;
}
if path == kernel_image_tar_path {
entry.unpack(&kernel_image_out_path).await?;
} else if path == kernel_addons_tar_path {
entry.unpack(&kernel_addons_out_path).await?;
}
}
if !kernel_image_out_path.exists() {
return Err(anyhow!("image did not contain a file named /kernel/image"));
}
println!("kernel version: v{}", version);
println!("kernel flavor: {}", flavor);
Ok(())
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata-ctl"
description = "Command-line tool to control the krata hypervisor"
description = "Command-line tool to control the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -11,6 +11,7 @@ resolver = "2"
[dependencies]
anyhow = { workspace = true }
async-stream = { workspace = true }
base64 = { workspace = true }
clap = { workspace = true }
comfy-table = { workspace = true }
crossterm = { workspace = true, features = ["event-stream"] }
@ -19,11 +20,12 @@ env_logger = { workspace = true }
fancy-duration = { workspace = true }
human_bytes = { workspace = true }
indicatif = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.11" }
log = { workspace = true }
prost-reflect = { workspace = true, features = ["serde"] }
prost-types = { workspace = true }
ratatui = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
termtree = { workspace = true }

View File

@ -0,0 +1,70 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use krata::v1::{
common::{GuestTaskSpec, GuestTaskSpecEnvVar},
control::{control_service_client::ControlServiceClient, ExecGuestRequest},
};
use tonic::{transport::Channel, Request};
use crate::console::StdioConsoleStream;
use super::resolve_guest;
#[derive(Parser)]
#[command(about = "Execute a command inside the guest")]
pub struct ExecCommand {
#[arg[short, long, help = "Environment variables"]]
env: Option<Vec<String>>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(help = "Guest to exec inside, either the name or the uuid")]
guest: String,
#[arg(
allow_hyphen_values = true,
trailing_var_arg = true,
help = "Command to run inside the guest"
)]
command: Vec<String>,
}
impl ExecCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
let initial = ExecGuestRequest {
guest_id,
task: Some(GuestTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
.iter()
.map(|(key, value)| GuestTaskSpecEnvVar {
key: key.clone(),
value: value.clone(),
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
}),
data: vec![],
};
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
let response = client.exec_guest(Request::new(stream)).await?.into_inner();
let code = StdioConsoleStream::exec_output(response).await?;
std::process::exit(code);
}
}
fn env_map(env: &[String]) -> HashMap<String, String> {
let mut map = HashMap::<String, String>::new();
for item in env {
if let Some((key, value)) = item.split_once('=') {
map.insert(key.to_string(), value.to_string());
}
}
map
}

View File

@ -0,0 +1,22 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{control_service_client::ControlServiceClient, IdentifyHostRequest};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Identify information about the host")]
pub struct IdentifyHostCommand {}
impl IdentifyHostCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.identify_host(Request::new(IdentifyHostRequest {}))
.await?
.into_inner();
println!("Host UUID: {}", response.host_uuid);
println!("Host Domain: {}", response.host_domid);
println!("Krata Version: {}", response.krata_version);
Ok(())
}
}

View File

@ -1,14 +1,18 @@
use anyhow::Result;
use base64::Engine;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
idm::{internal, serialize::IdmSerializable, transport::IdmTransportPacketForm},
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio_stream::StreamExt;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
use crate::format::{kv2line, proto2dynamic, value2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum IdmSnoopFormat {
@ -34,19 +38,22 @@ impl IdmSnoopCommand {
while let Some(reply) = stream.next().await {
let reply = reply?;
let Some(line) = convert_idm_snoop(reply) else {
continue;
};
match self.format {
IdmSnoopFormat::Simple => {
self.print_simple(reply)?;
self.print_simple(line)?;
}
IdmSnoopFormat::Jsonl => {
let value = serde_json::to_value(proto2dynamic(reply)?)?;
let encoded = serde_json::to_string(&value)?;
let encoded = serde_json::to_string(&line)?;
println!("{}", encoded.trim());
}
IdmSnoopFormat::KeyValue => {
self.print_key_value(reply)?;
self.print_key_value(line)?;
}
}
}
@ -54,21 +61,97 @@ impl IdmSnoopCommand {
Ok(())
}
fn print_simple(&self, reply: SnoopIdmReply) -> Result<()> {
let from = reply.from;
let to = reply.to;
let Some(packet) = reply.packet else {
return Ok(());
fn print_simple(&self, line: IdmSnoopLine) -> Result<()> {
let encoded = if !line.packet.decoded.is_null() {
serde_json::to_string(&line.packet.decoded)?
} else {
base64::prelude::BASE64_STANDARD.encode(&line.packet.data)
};
let value = serde_json::to_value(proto2dynamic(packet)?)?;
let encoded = serde_json::to_string(&value)?;
println!("({} -> {}) {}", from, to, encoded);
println!(
"({} -> {}) {} {} {}",
line.from, line.to, line.packet.id, line.packet.form, encoded
);
Ok(())
}
fn print_key_value(&self, reply: SnoopIdmReply) -> Result<()> {
let kvs = proto2kv(reply)?;
fn print_key_value(&self, line: IdmSnoopLine) -> Result<()> {
let kvs = value2kv(serde_json::to_value(line)?)?;
println!("{}", kv2line(kvs));
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopLine {
pub from: String,
pub to: String,
pub packet: IdmSnoopData,
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopData {
pub id: u64,
pub channel: u64,
pub form: String,
pub data: String,
pub decoded: Value,
}
pub fn convert_idm_snoop(reply: SnoopIdmReply) -> Option<IdmSnoopLine> {
let packet = &(reply.packet?);
let decoded = if packet.channel == 0 {
match packet.form() {
IdmTransportPacketForm::Event => internal::Event::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok()),
IdmTransportPacketForm::Request
| IdmTransportPacketForm::StreamRequest
| IdmTransportPacketForm::StreamRequestUpdate => {
internal::Request::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
IdmTransportPacketForm::Response | IdmTransportPacketForm::StreamResponseUpdate => {
internal::Response::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
_ => None,
}
} else {
None
};
let decoded = decoded
.and_then(|message| serde_json::to_value(message).ok())
.unwrap_or(Value::Null);
let data = IdmSnoopData {
id: packet.id,
channel: packet.channel,
form: match packet.form() {
IdmTransportPacketForm::Raw => "raw".to_string(),
IdmTransportPacketForm::Event => "event".to_string(),
IdmTransportPacketForm::Request => "request".to_string(),
IdmTransportPacketForm::Response => "response".to_string(),
IdmTransportPacketForm::StreamRequest => "stream-request".to_string(),
IdmTransportPacketForm::StreamRequestUpdate => "stream-request-update".to_string(),
IdmTransportPacketForm::StreamRequestClosed => "stream-request-closed".to_string(),
IdmTransportPacketForm::StreamResponseUpdate => "stream-response-update".to_string(),
IdmTransportPacketForm::StreamResponseClosed => "stream-response-closed".to_string(),
_ => format!("unknown-{}", packet.form),
},
data: base64::prelude::BASE64_STANDARD.encode(&packet.data),
decoded,
};
Some(IdmSnoopLine {
from: reply.from,
to: reply.to,
packet: data,
})
}

View File

@ -1,13 +1,13 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
v1::{
common::{
guest_image_spec::Image, GuestImageSpec, GuestOciImageFormat, GuestOciImageSpec,
GuestSpec, GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar,
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestSpecDevice,
GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar, OciImageFormat,
},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
@ -21,13 +21,19 @@ use tonic::{transport::Channel, Request};
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
use super::pull::PullImageFormat;
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
pub enum LaunchImageFormat {
Squashfs,
Erofs,
}
#[derive(Parser)]
#[command(about = "Launch a new guest")]
pub struct LauchCommand {
#[arg(short = 'S', long, default_value = "squashfs", help = "Image format")]
image_format: PullImageFormat,
pub struct LaunchCommand {
#[arg(long, default_value = "squashfs", help = "Image format")]
image_format: LaunchImageFormat,
#[arg(long, help = "Overwrite image cache on pull")]
pull_overwrite_cache: bool,
#[arg(short, long, help = "Name of the guest")]
name: Option<String>,
#[arg(
@ -44,6 +50,8 @@ pub struct LauchCommand {
help = "Memory available to the guest, in megabytes"
)]
mem: u64,
#[arg[short = 'D', long = "device", help = "Devices to request for the guest"]]
device: Vec<String>,
#[arg[short, long, help = "Environment variables set in the guest"]]
env: Option<Vec<String>>,
#[arg(
@ -58,6 +66,12 @@ pub struct LauchCommand {
help = "Wait for the guest to start, implied by --attach"
)]
wait: bool,
#[arg(short = 'k', long, help = "OCI kernel image for guest to use")]
kernel: Option<String>,
#[arg(short = 'I', long, help = "OCI initrd image for guest to use")]
initrd: Option<String>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(help = "Container image for guest to use")]
oci: String,
#[arg(
@ -68,32 +82,47 @@ pub struct LauchCommand {
command: Vec<String>,
}
impl LauchCommand {
impl LaunchCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let response = client
.pull_image(PullImageRequest {
image: self.oci.clone(),
format: match self.image_format {
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
let image = self
.pull_image(
&mut client,
&self.oci,
match self.image_format {
LaunchImageFormat::Squashfs => OciImageFormat::Squashfs,
LaunchImageFormat::Erofs => OciImageFormat::Erofs,
},
})
)
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
let kernel = if let Some(ref kernel) = self.kernel {
let kernel_image = self
.pull_image(&mut client, kernel, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let initrd = if let Some(ref initrd) = self.initrd {
let kernel_image = self
.pull_image(&mut client, initrd, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let request = CreateGuestRequest {
spec: Some(GuestSpec {
name: self.name.unwrap_or_default(),
image: Some(GuestImageSpec {
image: Some(Image::Oci(GuestOciImageSpec {
digest: reply.digest,
format: reply.format,
})),
}),
image: Some(image),
kernel,
initrd,
vcpus: self.cpus,
mem: self.mem,
task: Some(GuestTaskSpec {
@ -105,8 +134,14 @@ impl LauchCommand {
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
}),
annotations: vec![],
devices: self
.device
.iter()
.map(|name| GuestSpecDevice { name: name.clone() })
.collect(),
}),
};
let response = client
@ -139,6 +174,28 @@ impl LauchCommand {
StdioConsoleStream::restore_terminal_mode();
std::process::exit(code.unwrap_or(0));
}
async fn pull_image(
&self,
client: &mut ControlServiceClient<Channel>,
image: &str,
format: OciImageFormat,
) -> Result<GuestImageSpec> {
let response = client
.pull_image(PullImageRequest {
image: image.to_string(),
format: format.into(),
overwrite_cache: self.pull_overwrite_cache,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
Ok(GuestImageSpec {
image: Some(Image::Oci(GuestOciImageSpec {
digest: reply.digest,
format: reply.format,
})),
})
}
}
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {

View File

@ -28,7 +28,7 @@ enum ListFormat {
}
#[derive(Parser)]
#[command(about = "List the guests on the hypervisor")]
#[command(about = "List the guests on the isolation engine")]
pub struct ListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: ListFormat,

View File

@ -0,0 +1,128 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
use krata::{
events::EventStream,
v1::control::{control_service_client::ControlServiceClient, DeviceInfo, ListDevicesRequest},
};
use serde_json::Value;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ListDevicesFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
Simple,
}
#[derive(Parser)]
#[command(about = "List the devices on the isolation engine")]
pub struct ListDevicesCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: ListDevicesFormat,
}
impl ListDevicesCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let reply = client
.list_devices(ListDevicesRequest {})
.await?
.into_inner();
let mut devices = reply.devices;
devices.sort_by(|a, b| a.name.cmp(&b.name));
match self.format {
ListDevicesFormat::Table => {
self.print_devices_table(devices)?;
}
ListDevicesFormat::Simple => {
for device in devices {
println!("{}\t{}\t{}", device.name, device.claimed, device.owner);
}
}
ListDevicesFormat::Json | ListDevicesFormat::JsonPretty | ListDevicesFormat::Yaml => {
let mut values = Vec::new();
for device in devices {
let message = proto2dynamic(device)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == ListDevicesFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == ListDevicesFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
ListDevicesFormat::Jsonl => {
for device in devices {
let message = proto2dynamic(device)?;
println!("{}", serde_json::to_string(&message)?);
}
}
ListDevicesFormat::KeyValue => {
self.print_key_value(devices)?;
}
}
Ok(())
}
fn print_devices_table(&self, devices: Vec<DeviceInfo>) -> Result<()> {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["name", "status", "owner"]);
for device in devices {
let status_text = if device.claimed {
"claimed"
} else {
"available"
};
let status_color = if device.claimed {
Color::Blue
} else {
Color::Green
};
table.add_row(vec![
Cell::new(device.name),
Cell::new(status_text).fg(status_color),
Cell::new(device.owner),
]);
}
if table.is_empty() {
println!("no devices configured");
} else {
println!("{}", table);
}
Ok(())
}
fn print_key_value(&self, devices: Vec<DeviceInfo>) -> Result<()> {
for device in devices {
let kvs = proto2kv(device)?;
println!("{}", kv2line(kvs));
}
Ok(())
}
}

View File

@ -1,8 +1,11 @@
pub mod attach;
pub mod destroy;
pub mod exec;
pub mod identify_host;
pub mod idm_snoop;
pub mod launch;
pub mod list;
pub mod list_devices;
pub mod logs;
pub mod metrics;
pub mod pull;
@ -20,21 +23,20 @@ use krata::{
use tonic::{transport::Channel, Request};
use self::{
attach::AttachCommand, destroy::DestroyCommand, idm_snoop::IdmSnoopCommand,
launch::LauchCommand, list::ListCommand, logs::LogsCommand, metrics::MetricsCommand,
pull::PullCommand, resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
attach::AttachCommand, destroy::DestroyCommand, exec::ExecCommand,
identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand, launch::LaunchCommand,
list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand,
metrics::MetricsCommand, pull::PullCommand, resolve::ResolveCommand, top::TopCommand,
watch::WatchCommand,
};
#[derive(Parser)]
#[command(
version,
about = "Control the krata hypervisor, a secure platform for running containers"
)]
#[command(version, about = "Control the krata isolation engine")]
pub struct ControlCommand {
#[arg(
short,
long,
help = "The connection URL to the krata hypervisor",
help = "The connection URL to the krata isolation engine",
default_value = "unix:///var/lib/krata/daemon.socket"
)]
connection: String,
@ -45,9 +47,10 @@ pub struct ControlCommand {
#[derive(Subcommand)]
pub enum Commands {
Launch(LauchCommand),
Launch(LaunchCommand),
Destroy(DestroyCommand),
List(ListCommand),
ListDevices(ListDevicesCommand),
Attach(AttachCommand),
Pull(PullCommand),
Logs(LogsCommand),
@ -56,6 +59,8 @@ pub enum Commands {
Metrics(MetricsCommand),
IdmSnoop(IdmSnoopCommand),
Top(TopCommand),
IdentifyHost(IdentifyHostCommand),
Exec(ExecCommand),
}
impl ControlCommand {
@ -107,6 +112,18 @@ impl ControlCommand {
Commands::Pull(pull) => {
pull.run(client).await?;
}
Commands::IdentifyHost(identify) => {
identify.run(client).await?;
}
Commands::Exec(exec) => {
exec.run(client).await?;
}
Commands::ListDevices(list) => {
list.run(client, events).await?;
}
}
Ok(())
}

View File

@ -1,7 +1,7 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::v1::{
common::GuestOciImageFormat,
common::OciImageFormat,
control::{control_service_client::ControlServiceClient, PullImageRequest},
};
@ -13,6 +13,7 @@ use crate::pull::pull_interactive_progress;
pub enum PullImageFormat {
Squashfs,
Erofs,
Tar,
}
#[derive(Parser)]
@ -22,6 +23,8 @@ pub struct PullCommand {
image: String,
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
image_format: PullImageFormat,
#[arg(short = 'o', long, help = "Overwrite image cache")]
overwrite_cache: bool,
}
impl PullCommand {
@ -30,9 +33,11 @@ impl PullCommand {
.pull_image(PullImageRequest {
image: self.image.clone(),
format: match self.image_format {
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
PullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
PullImageFormat::Erofs => OciImageFormat::Erofs.into(),
PullImageFormat::Tar => OciImageFormat::Tar.into(),
},
overwrite_cache: self.overwrite_cache,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;

View File

@ -138,7 +138,7 @@ impl TopApp {
impl Widget for &mut TopApp {
fn render(self, area: Rect, buf: &mut Buffer) {
let title = Title::from(" krata hypervisor ".bold());
let title = Title::from(" krata isolation engine ".bold());
let instructions = Title::from(vec![" Quit ".into(), "<Q> ".blue().bold()]);
let block = Block::default()
.title(title.alignment(Alignment::Center))

View File

@ -1,4 +1,4 @@
use anyhow::Result;
use anyhow::{anyhow, Result};
use async_stream::stream;
use crossterm::{
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
@ -8,12 +8,15 @@ use krata::{
events::EventStream,
v1::{
common::GuestStatus,
control::{watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest},
control::{
watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest, ExecGuestReply,
ExecGuestRequest,
},
},
};
use log::debug;
use tokio::{
io::{stdin, stdout, AsyncReadExt, AsyncWriteExt},
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
task::JoinHandle,
};
use tokio_stream::{Stream, StreamExt};
@ -45,6 +48,31 @@ impl StdioConsoleStream {
}
}
pub async fn stdin_stream_exec(
initial: ExecGuestRequest,
) -> impl Stream<Item = ExecGuestRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
loop {
let size = match stdin.read(&mut buffer).await {
Ok(size) => size,
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
};
let data = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
yield ExecGuestRequest { guest_id: String::default(), task: None, data };
}
}
}
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
if stdin().is_tty() {
enable_raw_mode()?;
@ -62,6 +90,32 @@ impl StdioConsoleStream {
Ok(())
}
pub async fn exec_output(mut stream: Streaming<ExecGuestReply>) -> Result<i32> {
let mut stdout = stdout();
let mut stderr = stderr();
while let Some(reply) = stream.next().await {
let reply = reply?;
if !reply.stdout.is_empty() {
stdout.write_all(&reply.stdout).await?;
stdout.flush().await?;
}
if !reply.stderr.is_empty() {
stderr.write_all(&reply.stderr).await?;
stderr.flush().await?;
}
if reply.exited {
if reply.error.is_empty() {
return Ok(reply.exit_code);
} else {
return Err(anyhow!("exec failed: {}", reply.error));
}
}
}
Ok(-1)
}
pub async fn guest_exit_hook(
id: String,
events: EventStream,

View File

@ -4,7 +4,7 @@ use anyhow::Result;
use fancy_duration::FancyDuration;
use human_bytes::human_bytes;
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
use prost_reflect::{DynamicMessage, FieldDescriptor, ReflectMessage, Value as ReflectValue};
use prost_reflect::{DynamicMessage, ReflectMessage};
use prost_types::Value;
use termtree::Tree;
@ -15,64 +15,59 @@ pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
)?)
}
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
let message = proto2dynamic(proto)?;
pub fn value2kv(value: serde_json::Value) -> Result<HashMap<String, String>> {
let mut map = HashMap::new();
fn crawl(prefix: String, map: &mut HashMap<String, String>, value: serde_json::Value) {
fn dot(prefix: &str, next: String) -> String {
if prefix.is_empty() {
next.to_string()
} else {
format!("{}.{}", prefix, next)
}
}
fn crawl(
prefix: String,
field: Option<&FieldDescriptor>,
map: &mut HashMap<String, String>,
value: &ReflectValue,
) {
match value {
ReflectValue::Message(child) => {
for (field, field_value) in child.fields() {
let path = if prefix.is_empty() {
field.json_name().to_string()
} else {
format!("{}.{}", prefix, field.json_name())
};
crawl(path, Some(&field), map, field_value);
serde_json::Value::Null => {
map.insert(prefix, "null".to_string());
}
serde_json::Value::String(value) => {
map.insert(prefix, value);
}
serde_json::Value::Bool(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Number(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Array(value) => {
for (i, item) in value.into_iter().enumerate() {
let next = dot(&prefix, i.to_string());
crawl(next, map, item);
}
}
ReflectValue::EnumNumber(number) => {
if let Some(kind) = field.map(|x| x.kind()) {
if let Some(e) = kind.as_enum() {
if let Some(value) = e.get_value(*number) {
map.insert(prefix, value.name().to_string());
}
}
serde_json::Value::Object(value) => {
for (key, item) in value {
let next = dot(&prefix, key);
crawl(next, map, item);
}
}
ReflectValue::String(value) => {
map.insert(prefix.to_string(), value.clone());
}
ReflectValue::List(value) => {
for (x, value) in value.iter().enumerate() {
crawl(format!("{}.{}", prefix, x), field, map, value);
}
}
_ => {
map.insert(prefix.to_string(), value.to_string());
}
}
}
crawl(
"".to_string(),
None,
&mut map,
&ReflectValue::Message(message),
);
crawl("".to_string(), &mut map, value);
Ok(map)
}
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
let message = proto2dynamic(proto)?;
let value = serde_json::to_value(message)?;
value2kv(value)
}
pub fn kv2line(map: HashMap<String, String>) -> String {
map.iter()
.map(|(k, v)| format!("{}=\"{}\"", k, v.replace('"', "\\\"")))

View File

@ -1,20 +1,205 @@
use std::collections::HashMap;
use std::{
collections::{hash_map::Entry, HashMap},
time::Duration,
};
use anyhow::{anyhow, Result};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use krata::v1::control::{PullImageProgressLayerPhase, PullImageProgressPhase, PullImageReply};
use krata::v1::control::{
image_progress_indication::Indication, ImageProgressIndication, ImageProgressLayerPhase,
ImageProgressPhase, PullImageReply,
};
use tokio_stream::StreamExt;
use tonic::Streaming;
const SPINNER_STRINGS: &[&str] = &[
"[= ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ =]",
"[====================]",
];
fn progress_bar_for_indication(indication: &ImageProgressIndication) -> Option<ProgressBar> {
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => None,
Some(Indication::Bar(indic)) => {
let bar = ProgressBar::new(indic.total);
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Spinner(_)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Completed(indic)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
if !indic.message.is_empty() {
bar.finish_with_message(indic.message.clone());
} else {
bar.finish()
}
Some(bar)
}
}
}
fn configure_for_indication(
bar: &mut ProgressBar,
multi_progress: &mut MultiProgress,
indication: &ImageProgressIndication,
top_phase: Option<ImageProgressPhase>,
layer_phase: Option<ImageProgressLayerPhase>,
layer_id: Option<&str>,
) {
let prefix = if let Some(phase) = top_phase {
match phase {
ImageProgressPhase::Unknown => "unknown",
ImageProgressPhase::Started => "started",
ImageProgressPhase::Resolving => "resolving",
ImageProgressPhase::Resolved => "resolved",
ImageProgressPhase::ConfigDownload => "downloading",
ImageProgressPhase::LayerDownload => "downloading",
ImageProgressPhase::Assemble => "assembling",
ImageProgressPhase::Pack => "packing",
ImageProgressPhase::Complete => "complete",
}
} else if let Some(phase) = layer_phase {
match phase {
ImageProgressLayerPhase::Unknown => "unknown",
ImageProgressLayerPhase::Waiting => "waiting",
ImageProgressLayerPhase::Downloading => "downloading",
ImageProgressLayerPhase::Downloaded => "downloaded",
ImageProgressLayerPhase::Extracting => "extracting",
ImageProgressLayerPhase::Extracted => "extracted",
}
} else {
""
};
let prefix = prefix.to_string();
let id = if let Some(layer_id) = layer_id {
let hash = if let Some((_, hash)) = layer_id.split_once(':') {
hash
} else {
"unknown"
};
let small_hash = if hash.len() > 10 { &hash[0..10] } else { hash };
Some(format!("{:width$}", small_hash, width = 10))
} else {
None
};
let prefix = if let Some(id) = id {
format!("{} {:width$}", id, prefix, width = 11)
} else {
format!(" {:width$}", prefix, width = 11)
};
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => {
multi_progress.remove(bar);
return;
}
Some(Indication::Bar(indic)) => {
if indic.is_bytes {
bar.set_style(ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) eta: {eta}").unwrap().progress_chars("=>-"));
} else {
bar.set_style(
ProgressStyle::with_template(
"{prefix} [{bar:20} {msg} {human_pos}/{human_len} ({per_sec}/sec)",
)
.unwrap()
.progress_chars("=>-"),
);
}
bar.set_message(indic.message.clone());
bar.set_position(indic.current);
bar.set_length(indic.total);
}
Some(Indication::Spinner(indic)) => {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.set_message(indic.message.clone());
}
Some(Indication::Completed(indic)) => {
if bar.is_finished() {
return;
}
bar.disable_steady_tick();
bar.set_message(indic.message.clone());
if indic.total != 0 {
bar.set_position(indic.total);
bar.set_length(indic.total);
}
if bar.style().get_tick_str(0).contains('=') {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.finish_with_message(indic.message.clone());
} else if indic.is_bytes {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_total_bytes}")
.unwrap()
.progress_chars("=>-"),
);
} else {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg}")
.unwrap()
.progress_chars("=>-"),
);
}
bar.tick();
bar.enable_steady_tick(Duration::from_millis(100));
}
};
bar.set_prefix(prefix);
bar.tick();
}
pub async fn pull_interactive_progress(
mut stream: Streaming<PullImageReply>,
) -> Result<PullImageReply> {
let mut multi_progress: Option<(MultiProgress, HashMap<String, ProgressBar>)> = None;
let mut multi_progress = MultiProgress::new();
multi_progress.set_move_cursor(false);
let mut progresses = HashMap::new();
while let Some(reply) = stream.next().await {
let reply = reply?;
let reply = match reply {
Ok(reply) => reply,
Err(error) => {
multi_progress.clear()?;
return Err(error.into());
}
};
if reply.progress.is_none() && !reply.digest.is_empty() {
multi_progress.clear()?;
return Ok(reply);
}
@ -22,97 +207,62 @@ pub async fn pull_interactive_progress(
continue;
};
if multi_progress.is_none() {
multi_progress = Some((MultiProgress::new(), HashMap::new()));
for layer in &oci.layers {
let Some(ref indication) = layer.indication else {
continue;
};
let bar = match progresses.entry(layer.id.clone()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
None
}
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
None,
Some(layer.phase()),
Some(&layer.id),
);
}
}
let Some((multi_progress, progresses)) = multi_progress.as_mut() else {
continue;
};
if let Some(ref indication) = oci.indication {
let bar = match progresses.entry("root".to_string()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
match oci.phase() {
PullImageProgressPhase::Resolved
| PullImageProgressPhase::ConfigAcquire
| PullImageProgressPhase::LayerAcquire => {
if progresses.is_empty() && !oci.layers.is_empty() {
for layer in &oci.layers {
let bar = ProgressBar::new(layer.total);
bar.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert(layer.id.clone(), bar.clone());
multi_progress.add(bar);
}
}
for layer in oci.layers {
let Some(progress) = progresses.get_mut(&layer.id) else {
continue;
};
let phase = match layer.phase() {
PullImageProgressLayerPhase::Waiting => "waiting",
PullImageProgressLayerPhase::Downloading => "downloading",
PullImageProgressLayerPhase::Downloaded => "downloaded",
PullImageProgressLayerPhase::Extracting => "extracting",
PullImageProgressLayerPhase::Extracted => "extracted",
_ => "unknown",
};
let simple = if let Some((_, hash)) = layer.id.split_once(':') {
hash
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
"unknown"
};
let simple = if simple.len() > 10 {
&simple[0..10]
} else {
simple
};
let message = format!(
"{:width$} {:phwidth$}",
simple,
phase,
width = 10,
phwidth = 11
);
if message != progress.message() {
progress.set_message(message);
None
}
progress.update(|state| {
state.set_len(layer.total);
state.set_pos(layer.value);
});
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
Some(oci.phase()),
None,
None,
);
}
PullImageProgressPhase::Packing => {
for (key, bar) in &mut *progresses {
if key == "packing" {
continue;
}
bar.finish_and_clear();
multi_progress.remove(bar);
}
progresses.retain(|k, _| k == "packing");
if progresses.is_empty() {
let progress = ProgressBar::new(100);
progress.set_message("packing ");
progress.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
progresses.insert("packing".to_string(), progress);
}
let Some(progress) = progresses.get("packing") else {
continue;
};
progress.update(|state| {
state.set_len(oci.total);
state.set_pos(oci.value);
});
}
_ => {}
}
}
multi_progress.clear()?;
Err(anyhow!("never received final reply for image pull"))
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata-daemon"
description = "Daemon for the krata hypervisor."
description = "Daemon for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -17,15 +17,19 @@ circular-buffer = { workspace = true }
clap = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata-oci = { path = "../oci", version = "^0.0.9" }
krata-runtime = { path = "../runtime", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.11" }
krata-oci = { path = "../oci", version = "^0.0.11" }
krata-runtime = { path = "../runtime", version = "^0.0.11" }
log = { workspace = true }
prost = { workspace = true }
redb = { workspace = true }
scopeguard = { workspace = true }
serde = { workspace = true }
signal-hook = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
toml = { workspace = true }
krata-tokio-tar = { workspace = true }
tonic = { workspace = true, features = ["tls"] }
uuid = { workspace = true }

View File

@ -1,21 +1,9 @@
use anyhow::Result;
use clap::Parser;
use env_logger::Env;
use krata::dial::ControlDialAddress;
use kratad::Daemon;
use kratad::command::DaemonCommand;
use log::LevelFilter;
use std::{
str::FromStr,
sync::{atomic::AtomicBool, Arc},
};
#[derive(Parser)]
struct DaemonCommand {
#[arg(short, long, default_value = "unix:///var/lib/krata/daemon.socket")]
listen: String,
#[arg(short, long, default_value = "/var/lib/krata")]
store: String,
}
use std::sync::{atomic::AtomicBool, Arc};
#[tokio::main(flavor = "multi_thread", worker_threads = 10)]
async fn main() -> Result<()> {
@ -24,12 +12,8 @@ async fn main() -> Result<()> {
.init();
mask_sighup()?;
let args = DaemonCommand::parse();
let addr = ControlDialAddress::from_str(&args.listen)?;
let mut daemon = Daemon::new(args.store.clone()).await?;
daemon.listen(addr).await?;
Ok(())
let command = DaemonCommand::parse();
command.run().await
}
fn mask_sighup() -> Result<()> {

View File

@ -0,0 +1,36 @@
use anyhow::Result;
use clap::{CommandFactory, Parser};
use krata::dial::ControlDialAddress;
use std::str::FromStr;
use crate::Daemon;
#[derive(Parser)]
#[command(version, about = "krata isolation engine daemon")]
pub struct DaemonCommand {
#[arg(
short,
long,
default_value = "unix:///var/lib/krata/daemon.socket",
help = "Listen address"
)]
listen: String,
#[arg(short, long, default_value = "/var/lib/krata", help = "Storage path")]
store: String,
}
impl DaemonCommand {
pub async fn run(self) -> Result<()> {
let addr = ControlDialAddress::from_str(&self.listen)?;
let mut daemon = Daemon::new(self.store.clone()).await?;
daemon.listen(addr).await?;
Ok(())
}
pub fn version() -> String {
DaemonCommand::command()
.get_version()
.unwrap_or("unknown")
.to_string()
}
}

View File

@ -0,0 +1,55 @@
use std::{collections::HashMap, path::Path};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use tokio::fs;
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonConfig {
#[serde(default)]
pub pci: DaemonPciConfig,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonPciConfig {
#[serde(default)]
pub devices: HashMap<String, DaemonPciDeviceConfig>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct DaemonPciDeviceConfig {
pub locations: Vec<String>,
#[serde(default)]
pub permissive: bool,
#[serde(default)]
#[serde(rename = "msi-translate")]
pub msi_translate: bool,
#[serde(default)]
#[serde(rename = "power-management")]
pub power_management: bool,
#[serde(default)]
#[serde(rename = "rdm-reserve-policy")]
pub rdm_reserve_policy: DaemonPciDeviceRdmReservePolicy,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub enum DaemonPciDeviceRdmReservePolicy {
#[default]
#[serde(rename = "strict")]
Strict,
#[serde(rename = "relaxed")]
Relaxed,
}
impl DaemonConfig {
pub async fn load(path: &Path) -> Result<DaemonConfig> {
if path.exists() {
let content = fs::read_to_string(path).await?;
let config: DaemonConfig = toml::from_str(&content)?;
Ok(config)
} else {
fs::write(&path, "").await?;
Ok(DaemonConfig::default())
}
}
}

View File

@ -1,6 +1,6 @@
use std::{collections::HashMap, sync::Arc};
use anyhow::Result;
use anyhow::{anyhow, Result};
use circular_buffer::CircularBuffer;
use kratart::channel::ChannelService;
use log::error;
@ -11,6 +11,9 @@ use tokio::{
},
task::JoinHandle,
};
use uuid::Uuid;
use crate::glt::GuestLookupTable;
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
@ -21,6 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
#[derive(Clone)]
pub struct DaemonConsoleHandle {
glt: GuestLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
sender: Sender<(u32, Vec<u8>)>,
@ -50,9 +54,12 @@ impl DaemonConsoleAttachHandle {
impl DaemonConsoleHandle {
pub async fn attach(
&self,
domid: u32,
uuid: Uuid,
sender: Sender<Vec<u8>>,
) -> Result<DaemonConsoleAttachHandle> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
let buffers = self.buffers.lock().await;
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
drop(buffers);
@ -77,6 +84,7 @@ impl Drop for DaemonConsoleHandle {
}
pub struct DaemonConsole {
glt: GuestLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
receiver: Receiver<(u32, Option<Vec<u8>>)>,
@ -85,13 +93,14 @@ pub struct DaemonConsole {
}
impl DaemonConsole {
pub async fn new() -> Result<DaemonConsole> {
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
let (service, sender, receiver) =
ChannelService::new("krata-console".to_string(), Some(0)).await?;
let task = service.launch().await?;
let listeners = Arc::new(Mutex::new(HashMap::new()));
let buffers = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonConsole {
glt,
listeners,
buffers,
receiver,
@ -101,6 +110,7 @@ impl DaemonConsole {
}
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
let glt = self.glt.clone();
let listeners = self.listeners.clone();
let buffers = self.buffers.clone();
let sender = self.sender.clone();
@ -110,6 +120,7 @@ impl DaemonConsole {
}
});
Ok(DaemonConsoleHandle {
glt,
listeners,
buffers,
sender,

View File

@ -1,24 +1,27 @@
use async_stream::try_stream;
use futures::Stream;
use krata::{
idm::protocol::{
idm_request::Request as IdmRequestType, idm_response::Response as IdmResponseType,
IdmMetricsRequest,
idm::internal::{
exec_stream_request_update::Update, request::Request as IdmRequestType,
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
},
v1::{
common::{Guest, GuestOciImageFormat, GuestState, GuestStatus},
common::{Guest, GuestState, GuestStatus, OciImageFormat},
control::{
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
DeviceInfo, ExecGuestReply, ExecGuestRequest, IdentifyHostReply, IdentifyHostRequest,
ListDevicesReply, ListDevicesRequest, ListGuestsReply, ListGuestsRequest,
PullImageReply, PullImageRequest, ReadGuestMetricsReply, ReadGuestMetricsRequest,
ResolveGuestReply, ResolveGuestRequest, SnoopIdmReply, SnoopIdmRequest,
WatchEventsReply, WatchEventsRequest,
},
},
};
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciImagePacked, OciPackedFormat},
packer::{service::OciPackerService, OciPackedFormat, OciPackedImage},
progress::{OciProgress, OciProgressContext},
};
use std::{pin::Pin, str::FromStr};
@ -32,8 +35,9 @@ use tonic::{Request, Response, Status, Streaming};
use uuid::Uuid;
use crate::{
console::DaemonConsoleHandle, db::GuestStore, event::DaemonEventContext, idm::DaemonIdmHandle,
metrics::idm_metric_to_api, oci::convert_oci_progress,
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
devices::DaemonDeviceManager, event::DaemonEventContext, glt::GuestLookupTable,
idm::DaemonIdmHandle, metrics::idm_metric_to_api, oci::convert_oci_progress,
};
pub struct ApiError {
@ -56,6 +60,8 @@ impl From<ApiError> for Status {
#[derive(Clone)]
pub struct DaemonControlService {
glt: GuestLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
@ -65,7 +71,10 @@ pub struct DaemonControlService {
}
impl DaemonControlService {
#[allow(clippy::too_many_arguments)]
pub fn new(
glt: GuestLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
@ -74,6 +83,8 @@ impl DaemonControlService {
packer: OciPackerService,
) -> Self {
Self {
glt,
devices,
events,
console,
idm,
@ -90,12 +101,15 @@ enum ConsoleDataSelect {
}
enum PullImageSelect {
Progress(usize),
Completed(Result<Result<OciImagePacked, anyhow::Error>, JoinError>),
Progress(Option<OciProgress>),
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
}
#[tonic::async_trait]
impl ControlService for DaemonControlService {
type ExecGuestStream =
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
type ConsoleDataStream =
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
@ -108,6 +122,18 @@ impl ControlService for DaemonControlService {
type SnoopIdmStream =
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
async fn identify_host(
&self,
request: Request<IdentifyHostRequest>,
) -> Result<Response<IdentifyHostReply>, Status> {
let _ = request.into_inner();
Ok(Response::new(IdentifyHostReply {
host_domid: self.glt.host_domid(),
host_uuid: self.glt.host_uuid().to_string(),
krata_version: DaemonCommand::version(),
}))
}
async fn create_guest(
&self,
request: Request<CreateGuestRequest>,
@ -130,6 +156,7 @@ impl ControlService for DaemonControlService {
network: None,
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
domid: u32::MAX,
}),
spec: Some(spec),
@ -148,6 +175,98 @@ impl ControlService for DaemonControlService {
}))
}
async fn exec_guest(
&self,
request: Request<Streaming<ExecGuestRequest>>,
) -> Result<Response<Self::ExecGuestStream>, Status> {
let mut input = request.into_inner();
let Some(request) = input.next().await else {
return Err(ApiError {
message: "expected to have at least one request".to_string(),
}
.into());
};
let request = request?;
let Some(task) = request.task else {
return Err(ApiError {
message: "task is missing".to_string(),
}
.into());
};
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm_request = IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Start(ExecStreamRequestStart {
environment: task
.environment
.into_iter()
.map(|x| ExecEnvVar {
key: x.key,
value: x.value,
})
.collect(),
command: task.command,
working_directory: task.working_directory,
})),
})),
};
let output = try_stream! {
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
message: x.to_string(),
})?;
loop {
select! {
x = input.next() => if let Some(update) = x {
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
message: error.to_string()
}.into());
if let Ok(update) = update {
if !update.data.is_empty() {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Stdin(ExecStreamRequestStdin {
data: update.data,
})),
}))}).await;
}
}
},
x = handle.receiver.recv() => match x {
Some(response) => {
let Some(IdmResponseType::ExecStream(update)) = response.response else {
break;
};
let reply = ExecGuestReply {
exited: update.exited,
error: update.error,
exit_code: update.exit_code,
stdout: update.stdout,
stderr: update.stderr
};
yield reply;
},
None => {
break;
}
}
};
}
};
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
}
async fn destroy_guest(
&self,
request: Request<DestroyGuestRequest>,
@ -230,36 +349,10 @@ impl ControlService for DaemonControlService {
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let guest = self
.guests
.read(uuid)
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?
.ok_or_else(|| ApiError {
message: "guest did not exist in the database".to_string(),
})?;
let Some(ref state) = guest.state else {
return Err(ApiError {
message: "guest did not have state".to_string(),
}
.into());
};
let domid = state.domid;
if domid == 0 {
return Err(ApiError {
message: "invalid domid on the guest".to_string(),
}
.into());
}
let (sender, mut receiver) = channel(100);
let console = self
.console
.attach(domid, sender)
.attach(uuid, sender)
.await
.map_err(|error| ApiError {
message: format!("failed to attach to console: {}", error),
@ -309,45 +402,21 @@ impl ControlService for DaemonControlService {
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let guest = self
.guests
.read(uuid)
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?
.ok_or_else(|| ApiError {
message: "guest did not exist in the database".to_string(),
})?;
let Some(ref state) = guest.state else {
return Err(ApiError {
message: "guest did not have state".to_string(),
}
.into());
};
let domid = state.domid;
if domid == 0 {
return Err(ApiError {
message: "invalid domid on the guest".to_string(),
}
.into());
}
let client = self.idm.client(domid).await.map_err(|error| ApiError {
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let response = client
.send(IdmRequestType::Metrics(IdmMetricsRequest {}))
.send(IdmRequest {
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
})
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?;
let mut reply = ReadGuestMetricsReply::default();
if let IdmResponseType::Metrics(metrics) = response {
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
reply.root = metrics.root.map(idm_metric_to_api);
}
Ok(Response::new(reply))
@ -362,36 +431,39 @@ impl ControlService for DaemonControlService {
message: err.to_string(),
})?;
let format = match request.format() {
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => OciPackedFormat::Tar,
};
let (sender, mut receiver) = channel::<OciProgress>(100);
let context = OciProgressContext::new(sender);
let (context, mut receiver) = OciProgressContext::create();
let our_packer = self.packer.clone();
let output = try_stream! {
let mut task = tokio::task::spawn(async move {
our_packer.request(name, format, context).await
our_packer.request(name, format, request.overwrite_cache, context).await
});
let abort_handle = task.abort_handle();
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
handle.abort();
});
loop {
let mut progresses = Vec::new();
let what = select! {
x = receiver.recv_many(&mut progresses, 10) => PullImageSelect::Progress(x),
x = receiver.changed() => match x {
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
Err(_) => PullImageSelect::Progress(None),
},
x = &mut task => PullImageSelect::Completed(x),
};
match what {
PullImageSelect::Progress(count) => {
if count > 0 {
let progress = progresses.remove(progresses.len() - 1);
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: GuestOciImageFormat::Unknown.into(),
};
yield reply;
}
PullImageSelect::Progress(Some(progress)) => {
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: OciImageFormat::Unknown.into(),
};
yield reply;
},
PullImageSelect::Completed(result) => {
@ -405,13 +477,18 @@ impl ControlService for DaemonControlService {
progress: None,
digest: packed.digest,
format: match packed.format {
OciPackedFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => GuestOciImageFormat::Erofs.into(),
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
},
};
yield reply;
break;
},
_ => {
continue;
}
}
}
};
@ -438,11 +515,37 @@ impl ControlService for DaemonControlService {
) -> Result<Response<Self::SnoopIdmStream>, Status> {
let _ = request.into_inner();
let mut messages = self.idm.snoop();
let glt = self.glt.clone();
let output = try_stream! {
while let Ok(event) = messages.recv().await {
yield SnoopIdmReply { from: event.from, to: event.to, packet: Some(event.packet) };
let Some(from_uuid) = glt.lookup_uuid_by_domid(event.from).await else {
continue;
};
let Some(to_uuid) = glt.lookup_uuid_by_domid(event.to).await else {
continue;
};
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
}
};
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
}
async fn list_devices(
&self,
request: Request<ListDevicesRequest>,
) -> Result<Response<ListDevicesReply>, Status> {
let _ = request.into_inner();
let mut devices = Vec::new();
let state = self.devices.copy().await.map_err(|error| ApiError {
message: error.to_string(),
})?;
for (name, state) in state {
devices.push(DeviceInfo {
name,
claimed: state.owner.is_some(),
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
});
}
Ok(Response::new(ListDevicesReply { devices }))
}
}

View File

@ -0,0 +1,106 @@
use std::{collections::HashMap, sync::Arc};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::sync::RwLock;
use uuid::Uuid;
use crate::config::{DaemonConfig, DaemonPciDeviceConfig};
#[derive(Clone)]
pub struct DaemonDeviceState {
pub pci: Option<DaemonPciDeviceConfig>,
pub owner: Option<Uuid>,
}
#[derive(Clone)]
pub struct DaemonDeviceManager {
config: Arc<DaemonConfig>,
devices: Arc<RwLock<HashMap<String, DaemonDeviceState>>>,
}
impl DaemonDeviceManager {
pub fn new(config: Arc<DaemonConfig>) -> Self {
Self {
config,
devices: Arc::new(RwLock::new(HashMap::new())),
}
}
pub async fn claim(&self, device: &str, uuid: Uuid) -> Result<DaemonDeviceState> {
let mut devices = self.devices.write().await;
let Some(state) = devices.get_mut(device) else {
return Err(anyhow!(
"unable to claim unknown device '{}' for guest {}",
device,
uuid
));
};
if let Some(owner) = state.owner {
return Err(anyhow!(
"unable to claim device '{}' for guest {}: already claimed by {}",
device,
uuid,
owner
));
}
state.owner = Some(uuid);
Ok(state.clone())
}
pub async fn release_all(&self, uuid: Uuid) -> Result<()> {
let mut devices = self.devices.write().await;
for state in (*devices).values_mut() {
if state.owner == Some(uuid) {
state.owner = None;
}
}
Ok(())
}
pub async fn release(&self, device: &str, uuid: Uuid) -> Result<()> {
let mut devices = self.devices.write().await;
let Some(state) = devices.get_mut(device) else {
return Ok(());
};
if let Some(owner) = state.owner {
if owner != uuid {
return Ok(());
}
}
state.owner = None;
Ok(())
}
pub async fn update_claims(&self, claims: HashMap<String, Uuid>) -> Result<()> {
let mut devices = self.devices.write().await;
devices.clear();
for (name, pci) in &self.config.pci.devices {
let owner = claims.get(name).cloned();
devices.insert(
name.clone(),
DaemonDeviceState {
owner,
pci: Some(pci.clone()),
},
);
}
for (name, uuid) in &claims {
if !devices.contains_key(name) {
warn!("unknown device '{}' assigned to guest {}", name, uuid);
}
}
Ok(())
}
pub async fn copy(&self) -> Result<HashMap<String, DaemonDeviceState>> {
let devices = self.devices.read().await;
Ok(devices.clone())
}
}

View File

@ -6,7 +6,7 @@ use std::{
use anyhow::Result;
use krata::{
idm::protocol::{idm_event::Event, IdmEvent},
idm::{internal::event::Event as EventType, internal::Event},
v1::common::{GuestExitInfo, GuestState, GuestStatus},
};
use log::{error, warn};
@ -50,8 +50,8 @@ pub struct DaemonEventGenerator {
feed: broadcast::Receiver<DaemonEvent>,
idm: DaemonIdmHandle,
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
idm_sender: Sender<(u32, IdmEvent)>,
idm_receiver: Receiver<(u32, IdmEvent)>,
idm_sender: Sender<(u32, Event)>,
idm_receiver: Receiver<(u32, Event)>,
_event_sender: broadcast::Sender<DaemonEvent>,
}
@ -93,7 +93,7 @@ impl DaemonEventGenerator {
match status {
GuestStatus::Started => {
if let Entry::Vacant(e) = self.idms.entry(domid) {
let client = self.idm.client(domid).await?;
let client = self.idm.client_by_domid(domid).await?;
let mut receiver = client.subscribe().await?;
let sender = self.idm_sender.clone();
let task = tokio::task::spawn(async move {
@ -122,9 +122,9 @@ impl DaemonEventGenerator {
Ok(())
}
async fn handle_idm_event(&mut self, id: Uuid, event: IdmEvent) -> Result<()> {
async fn handle_idm_event(&mut self, id: Uuid, event: Event) -> Result<()> {
match event.event {
Some(Event::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
Some(EventType::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
None => Ok(()),
}
}
@ -136,6 +136,7 @@ impl DaemonEventGenerator {
network: guest.state.clone().unwrap_or_default().network,
exit_info: Some(GuestExitInfo { code }),
error_info: None,
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
});

69
crates/daemon/src/glt.rs Normal file
View File

@ -0,0 +1,69 @@
use std::{collections::HashMap, sync::Arc};
use tokio::sync::RwLock;
use uuid::Uuid;
struct GuestLookupTableState {
domid_to_uuid: HashMap<u32, Uuid>,
uuid_to_domid: HashMap<Uuid, u32>,
}
impl GuestLookupTableState {
pub fn new(host_uuid: Uuid) -> Self {
let mut domid_to_uuid = HashMap::new();
let mut uuid_to_domid = HashMap::new();
domid_to_uuid.insert(0, host_uuid);
uuid_to_domid.insert(host_uuid, 0);
GuestLookupTableState {
domid_to_uuid,
uuid_to_domid,
}
}
}
#[derive(Clone)]
pub struct GuestLookupTable {
host_domid: u32,
host_uuid: Uuid,
state: Arc<RwLock<GuestLookupTableState>>,
}
impl GuestLookupTable {
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
GuestLookupTable {
host_domid,
host_uuid,
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
}
}
pub fn host_uuid(&self) -> Uuid {
self.host_uuid
}
pub fn host_domid(&self) -> u32 {
self.host_domid
}
pub async fn lookup_uuid_by_domid(&self, domid: u32) -> Option<Uuid> {
let state = self.state.read().await;
state.domid_to_uuid.get(&domid).cloned()
}
pub async fn lookup_domid_by_uuid(&self, uuid: &Uuid) -> Option<u32> {
let state = self.state.read().await;
state.uuid_to_domid.get(uuid).cloned()
}
pub async fn associate(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.insert(uuid, domid);
state.domid_to_uuid.insert(domid, uuid);
}
pub async fn remove(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.remove(&uuid);
state.domid_to_uuid.remove(&domid);
}
}

View File

@ -6,8 +6,9 @@ use std::{
use anyhow::{anyhow, Result};
use bytes::{Buf, BytesMut};
use krata::idm::{
client::{IdmBackend, IdmClient},
protocol::IdmPacket,
client::{IdmBackend, IdmInternalClient},
internal::INTERNAL_IDM_CHANNEL,
transport::IdmTransportPacket,
};
use kratart::channel::ChannelService;
use log::{error, warn};
@ -21,15 +22,19 @@ use tokio::{
},
task::JoinHandle,
};
use uuid::Uuid;
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmPacket>>>>;
type ClientMap = Arc<Mutex<HashMap<u32, IdmClient>>>;
use crate::glt::GuestLookupTable;
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
#[derive(Clone)]
pub struct DaemonIdmHandle {
glt: GuestLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmPacket)>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
task: Arc<JoinHandle<()>>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
}
@ -39,7 +44,14 @@ impl DaemonIdmHandle {
self.snoop_sender.subscribe()
}
pub async fn client(&self, domid: u32) -> Result<IdmClient> {
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
self.client_by_domid(domid).await
}
pub async fn client_by_domid(&self, domid: u32) -> Result<IdmInternalClient> {
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
}
}
@ -56,22 +68,23 @@ impl Drop for DaemonIdmHandle {
pub struct DaemonIdmSnoopPacket {
pub from: u32,
pub to: u32,
pub packet: IdmPacket,
pub packet: IdmTransportPacket,
}
pub struct DaemonIdm {
glt: GuestLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmPacket)>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
tx_raw_sender: Sender<(u32, Vec<u8>)>,
tx_receiver: Receiver<(u32, IdmPacket)>,
tx_receiver: Receiver<(u32, IdmTransportPacket)>,
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
task: JoinHandle<()>,
}
impl DaemonIdm {
pub async fn new() -> Result<DaemonIdm> {
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
let (service, tx_raw_sender, rx_receiver) =
ChannelService::new("krata-channel".to_string(), None).await?;
let (tx_sender, tx_receiver) = channel(100);
@ -80,6 +93,7 @@ impl DaemonIdm {
let clients = Arc::new(Mutex::new(HashMap::new()));
let feeds = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonIdm {
glt,
rx_receiver,
tx_receiver,
tx_sender,
@ -92,6 +106,7 @@ impl DaemonIdm {
}
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
let glt = self.glt.clone();
let clients = self.clients.clone();
let feeds = self.feeds.clone();
let tx_sender = self.tx_sender.clone();
@ -104,6 +119,7 @@ impl DaemonIdm {
}
});
Ok(DaemonIdmHandle {
glt,
clients,
feeds,
tx_sender,
@ -136,7 +152,7 @@ impl DaemonIdm {
}
let mut packet = buffer.split_to(needed);
packet.advance(6);
match IdmPacket::decode(packet) {
match IdmTransportPacket::decode(packet) {
Ok(packet) => {
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
let guard = self.feeds.lock().await;
@ -196,10 +212,10 @@ impl Drop for DaemonIdm {
async fn client_or_create(
domid: u32,
tx_sender: &Sender<(u32, IdmPacket)>,
tx_sender: &Sender<(u32, IdmTransportPacket)>,
clients: &ClientMap,
feeds: &BackendFeedMap,
) -> Result<IdmClient> {
) -> Result<IdmInternalClient> {
let mut clients = clients.lock().await;
let mut feeds = feeds.lock().await;
match clients.entry(domid) {
@ -212,7 +228,11 @@ async fn client_or_create(
rx_receiver,
tx_sender: tx_sender.clone(),
};
let client = IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await?;
let client = IdmInternalClient::new(
INTERNAL_IDM_CHANNEL,
Box::new(backend) as Box<dyn IdmBackend>,
)
.await?;
entry.insert(client.clone());
Ok(client)
}
@ -221,13 +241,13 @@ async fn client_or_create(
pub struct IdmDaemonBackend {
domid: u32,
rx_receiver: Receiver<IdmPacket>,
tx_sender: Sender<(u32, IdmPacket)>,
rx_receiver: Receiver<IdmTransportPacket>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
}
#[async_trait::async_trait]
impl IdmBackend for IdmDaemonBackend {
async fn recv(&mut self) -> Result<IdmPacket> {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
if let Some(packet) = self.rx_receiver.recv().await {
Ok(packet)
} else {
@ -235,7 +255,7 @@ impl IdmBackend for IdmDaemonBackend {
}
}
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
self.tx_sender.send((self.domid, packet)).await?;
Ok(())
}

View File

@ -1,10 +1,13 @@
use std::{net::SocketAddr, path::PathBuf, str::FromStr};
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
use anyhow::Result;
use anyhow::{anyhow, Result};
use config::DaemonConfig;
use console::{DaemonConsole, DaemonConsoleHandle};
use control::DaemonControlService;
use db::GuestStore;
use devices::DaemonDeviceManager;
use event::{DaemonEventContext, DaemonEventGenerator};
use glt::GuestLookupTable;
use idm::{DaemonIdm, DaemonIdmHandle};
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
@ -21,10 +24,14 @@ use tokio_stream::wrappers::UnixListenerStream;
use tonic::transport::{Identity, Server, ServerTlsConfig};
use uuid::Uuid;
pub mod command;
pub mod config;
pub mod console;
pub mod control;
pub mod db;
pub mod devices;
pub mod event;
pub mod glt;
pub mod idm;
pub mod metrics;
pub mod oci;
@ -32,6 +39,9 @@ pub mod reconcile;
pub struct Daemon {
store: String,
_config: Arc<DaemonConfig>,
glt: GuestLookupTable,
devices: DaemonDeviceManager,
guests: GuestStore,
events: DaemonEventContext,
guest_reconciler_task: JoinHandle<()>,
@ -46,32 +56,68 @@ const GUEST_RECONCILER_QUEUE_LEN: usize = 1000;
impl Daemon {
pub async fn new(store: String) -> Result<Self> {
let mut image_cache_dir = PathBuf::from(store.clone());
let store_dir = PathBuf::from(store.clone());
let mut config_path = store_dir.clone();
config_path.push("config.toml");
let config = DaemonConfig::load(&config_path).await?;
let config = Arc::new(config);
let devices = DaemonDeviceManager::new(config.clone());
let mut image_cache_dir = store_dir.clone();
image_cache_dir.push("cache");
image_cache_dir.push("image");
fs::create_dir_all(&image_cache_dir).await?;
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current())?;
let mut host_uuid_path = store_dir.clone();
host_uuid_path.push("host.uuid");
let host_uuid = if host_uuid_path.is_file() {
let content = fs::read_to_string(&host_uuid_path).await?;
Uuid::from_str(content.trim()).ok()
} else {
None
};
let runtime = Runtime::new(store.clone()).await?;
let host_uuid = if let Some(host_uuid) = host_uuid {
host_uuid
} else {
let generated = Uuid::new_v4();
let mut string = generated.to_string();
string.push('\n');
fs::write(&host_uuid_path, string).await?;
generated
};
let initrd_path = detect_guest_path(&store, "initrd")?;
let kernel_path = detect_guest_path(&store, "kernel")?;
let addons_path = detect_guest_path(&store, "addons.squashfs")?;
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current()).await?;
let runtime = Runtime::new(host_uuid).await?;
let glt = GuestLookupTable::new(0, host_uuid);
let guests_db_path = format!("{}/guests.db", store);
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
let (guest_reconciler_notify, guest_reconciler_receiver) =
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
let idm = DaemonIdm::new().await?;
let idm = DaemonIdm::new(glt.clone()).await?;
let idm = idm.launch().await?;
let console = DaemonConsole::new().await?;
let console = DaemonConsole::new(glt.clone()).await?;
let console = console.launch().await?;
let (events, generator) =
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
.await?;
let runtime_for_reconciler = runtime.dupe().await?;
let guest_reconciler = GuestReconciler::new(
devices.clone(),
glt.clone(),
guests.clone(),
events.clone(),
runtime_for_reconciler,
packer.clone(),
guest_reconciler_notify.clone(),
kernel_path,
initrd_path,
addons_path,
)?;
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
@ -79,6 +125,9 @@ impl Daemon {
Ok(Self {
store,
_config: config,
glt,
devices,
guests,
events,
guest_reconciler_task,
@ -92,6 +141,8 @@ impl Daemon {
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
let control_service = DaemonControlService::new(
self.glt.clone(),
self.devices.clone(),
self.events.clone(),
self.console.clone(),
self.idm.clone(),
@ -154,3 +205,16 @@ impl Drop for Daemon {
self.generator_task.abort();
}
}
fn detect_guest_path(store: &str, name: &str) -> Result<PathBuf> {
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
if path.is_file() {
return Ok(path);
}
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
if path.is_file() {
return Ok(path);
}
Err(anyhow!("unable to find required guest file: {}", name))
}

View File

@ -1,18 +1,18 @@
use krata::{
idm::protocol::{IdmMetricFormat, IdmMetricNode},
idm::internal::{MetricFormat, MetricNode},
v1::common::{GuestMetricFormat, GuestMetricNode},
};
fn idm_metric_format_to_api(format: IdmMetricFormat) -> GuestMetricFormat {
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
match format {
IdmMetricFormat::Unknown => GuestMetricFormat::Unknown,
IdmMetricFormat::Bytes => GuestMetricFormat::Bytes,
IdmMetricFormat::Integer => GuestMetricFormat::Integer,
IdmMetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
MetricFormat::Unknown => GuestMetricFormat::Unknown,
MetricFormat::Bytes => GuestMetricFormat::Bytes,
MetricFormat::Integer => GuestMetricFormat::Integer,
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
}
}
pub fn idm_metric_to_api(node: IdmMetricNode) -> GuestMetricNode {
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
let format = node.format();
GuestMetricNode {
name: node.name,

View File

@ -1,33 +1,72 @@
use krata::v1::control::{
PullImageProgress, PullImageProgressLayer, PullImageProgressLayerPhase, PullImageProgressPhase,
image_progress_indication::Indication, ImageProgress, ImageProgressIndication,
ImageProgressIndicationBar, ImageProgressIndicationCompleted, ImageProgressIndicationHidden,
ImageProgressIndicationSpinner, ImageProgressLayer, ImageProgressLayerPhase,
ImageProgressPhase,
};
use krataoci::progress::{
OciProgress, OciProgressIndication, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase,
};
use krataoci::progress::{OciProgress, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase};
fn convert_oci_layer_progress(layer: OciProgressLayer) -> PullImageProgressLayer {
PullImageProgressLayer {
id: layer.id,
phase: match layer.phase {
OciProgressLayerPhase::Waiting => PullImageProgressLayerPhase::Waiting,
OciProgressLayerPhase::Downloading => PullImageProgressLayerPhase::Downloading,
OciProgressLayerPhase::Downloaded => PullImageProgressLayerPhase::Downloaded,
OciProgressLayerPhase::Extracting => PullImageProgressLayerPhase::Extracting,
OciProgressLayerPhase::Extracted => PullImageProgressLayerPhase::Extracted,
}
.into(),
value: layer.value,
total: layer.total,
fn convert_oci_progress_indication(indication: OciProgressIndication) -> ImageProgressIndication {
ImageProgressIndication {
indication: Some(match indication {
OciProgressIndication::Hidden => Indication::Hidden(ImageProgressIndicationHidden {}),
OciProgressIndication::ProgressBar {
message,
current,
total,
bytes,
} => Indication::Bar(ImageProgressIndicationBar {
message: message.unwrap_or_default(),
current,
total,
is_bytes: bytes,
}),
OciProgressIndication::Spinner { message } => {
Indication::Spinner(ImageProgressIndicationSpinner {
message: message.unwrap_or_default(),
})
}
OciProgressIndication::Completed {
message,
total,
bytes,
} => Indication::Completed(ImageProgressIndicationCompleted {
message: message.unwrap_or_default(),
total: total.unwrap_or(0),
is_bytes: bytes,
}),
}),
}
}
pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
PullImageProgress {
fn convert_oci_layer_progress(layer: OciProgressLayer) -> ImageProgressLayer {
ImageProgressLayer {
id: layer.id,
phase: match layer.phase {
OciProgressLayerPhase::Waiting => ImageProgressLayerPhase::Waiting,
OciProgressLayerPhase::Downloading => ImageProgressLayerPhase::Downloading,
OciProgressLayerPhase::Downloaded => ImageProgressLayerPhase::Downloaded,
OciProgressLayerPhase::Extracting => ImageProgressLayerPhase::Extracting,
OciProgressLayerPhase::Extracted => ImageProgressLayerPhase::Extracted,
}
.into(),
indication: Some(convert_oci_progress_indication(layer.indication)),
}
}
pub fn convert_oci_progress(oci: OciProgress) -> ImageProgress {
ImageProgress {
phase: match oci.phase {
OciProgressPhase::Resolving => PullImageProgressPhase::Resolving,
OciProgressPhase::Resolved => PullImageProgressPhase::Resolved,
OciProgressPhase::ConfigAcquire => PullImageProgressPhase::ConfigAcquire,
OciProgressPhase::LayerAcquire => PullImageProgressPhase::LayerAcquire,
OciProgressPhase::Packing => PullImageProgressPhase::Packing,
OciProgressPhase::Complete => PullImageProgressPhase::Complete,
OciProgressPhase::Started => ImageProgressPhase::Started,
OciProgressPhase::Resolving => ImageProgressPhase::Resolving,
OciProgressPhase::Resolved => ImageProgressPhase::Resolved,
OciProgressPhase::ConfigDownload => ImageProgressPhase::ConfigDownload,
OciProgressPhase::LayerDownload => ImageProgressPhase::LayerDownload,
OciProgressPhase::Assemble => ImageProgressPhase::Assemble,
OciProgressPhase::Pack => ImageProgressPhase::Pack,
OciProgressPhase::Complete => ImageProgressPhase::Complete,
}
.into(),
layers: oci
@ -35,7 +74,6 @@ pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
.into_values()
.map(convert_oci_layer_progress)
.collect::<Vec<_>>(),
value: oci.value,
total: oci.total,
indication: Some(convert_oci_progress_indication(oci.indication)),
}
}

View File

@ -1,20 +1,17 @@
use std::{
collections::{hash_map::Entry, HashMap},
path::PathBuf,
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, Result};
use krata::launchcfg::LaunchPackedFormat;
use anyhow::Result;
use krata::v1::{
common::{
guest_image_spec::Image, Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState,
GuestOciImageFormat, GuestState, GuestStatus,
},
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
control::GuestChangedEvent,
};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::{launch::GuestLaunchRequest, GuestInfo, Runtime};
use krataoci::packer::service::OciPackerService;
use kratart::{GuestInfo, Runtime};
use log::{error, info, trace, warn};
use tokio::{
select,
@ -29,9 +26,15 @@ use uuid::Uuid;
use crate::{
db::GuestStore,
devices::DaemonDeviceManager,
event::{DaemonEvent, DaemonEventContext},
glt::GuestLookupTable,
};
use self::start::GuestStarter;
mod start;
const PARALLEL_LIMIT: u32 = 5;
#[derive(Debug)]
@ -53,28 +56,44 @@ impl Drop for GuestReconcilerEntry {
#[derive(Clone)]
pub struct GuestReconciler {
devices: DaemonDeviceManager,
glt: GuestLookupTable,
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
kernel_path: PathBuf,
initrd_path: PathBuf,
addons_path: PathBuf,
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
guest_reconciler_notify: Sender<Uuid>,
reconcile_lock: Arc<RwLock<()>>,
}
impl GuestReconciler {
#[allow(clippy::too_many_arguments)]
pub fn new(
devices: DaemonDeviceManager,
glt: GuestLookupTable,
guests: GuestStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
guest_reconciler_notify: Sender<Uuid>,
kernel_path: PathBuf,
initrd_path: PathBuf,
modules_path: PathBuf,
) -> Result<Self> {
Ok(Self {
devices,
glt,
guests,
events,
runtime,
packer,
kernel_path,
initrd_path,
addons_path: modules_path,
tasks: Arc::new(Mutex::new(HashMap::new())),
guest_reconciler_notify,
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
@ -123,6 +142,25 @@ impl GuestReconciler {
trace!("reconciling runtime");
let runtime_guests = self.runtime.list().await?;
let stored_guests = self.guests.list().await?;
let non_existent_guests = runtime_guests
.iter()
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
.collect::<Vec<_>>();
for guest in non_existent_guests {
warn!("destroying unknown runtime guest {}", guest.uuid);
if let Err(error) = self.runtime.destroy(guest.uuid).await {
error!(
"failed to destroy unknown runtime guest {}: {}",
guest.uuid, error
);
}
self.guests.remove(guest.uuid).await?;
}
let mut device_claims = HashMap::new();
for (uuid, mut stored_guest) in stored_guests {
let previous_guest = stored_guest.clone();
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
@ -136,6 +174,7 @@ impl GuestReconciler {
}
Some(runtime) => {
self.glt.associate(uuid, runtime.domid).await;
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
if let Some(code) = runtime.state.exit_code {
state.status = GuestStatus::Exited.into();
@ -143,6 +182,17 @@ impl GuestReconciler {
} else {
state.status = GuestStatus::Started.into();
}
for device in &stored_guest
.spec
.as_ref()
.cloned()
.unwrap_or_default()
.devices
{
device_claims.insert(device.name.clone(), uuid);
}
state.network = Some(guestinfo_to_networkstate(runtime));
stored_guest.state = Some(state);
}
@ -155,6 +205,9 @@ impl GuestReconciler {
let _ = self.guest_reconciler_notify.try_send(uuid);
}
}
self.devices.update_claims(device_claims).await?;
Ok(())
}
@ -224,71 +277,16 @@ impl GuestReconciler {
}
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
let Some(ref spec) = guest.spec else {
return Err(anyhow!("guest spec not specified"));
let starter = GuestStarter {
devices: &self.devices,
kernel_path: &self.kernel_path,
initrd_path: &self.initrd_path,
addons_path: &self.addons_path,
packer: &self.packer,
glt: &self.glt,
runtime: &self.runtime,
};
let Some(ref image) = spec.image else {
return Err(anyhow!("image spec not provided"));
};
let oci = match image.image {
Some(Image::Oci(ref oci)) => oci,
None => {
return Err(anyhow!("oci spec not specified"));
}
};
let task = spec.task.as_ref().cloned().unwrap_or_default();
let image = self
.packer
.recall(
&oci.digest,
match oci.format() {
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
},
)
.await?;
let Some(image) = image else {
return Err(anyhow!(
"image {} in the requested format did not exist",
oci.digest
));
};
let info = self
.runtime
.launch(GuestLaunchRequest {
format: LaunchPackedFormat::Squashfs,
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
} else {
Some(spec.name.clone())
},
image,
vcpus: spec.vcpus,
mem: spec.mem,
env: task
.environment
.iter()
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
debug: false,
})
.await?;
info!("started guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Started.into(),
network: Some(guestinfo_to_networkstate(&info)),
exit_info: None,
error_info: None,
domid: info.domid,
});
Ok(GuestReconcilerResult::Changed { rerun: false })
starter.start(uuid, guest).await
}
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
@ -305,14 +303,22 @@ impl GuestReconciler {
trace!("failed to destroy runtime guest {}: {}", uuid, error);
}
let domid = guest.state.as_ref().map(|x| x.domid);
if let Some(domid) = domid {
self.glt.remove(uuid, domid).await;
}
info!("destroyed guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Destroyed.into(),
network: None,
exit_info: None,
error_info: None,
domid: guest.state.as_ref().map(|x| x.domid).unwrap_or(u32::MAX),
host: self.glt.host_uuid().to_string(),
domid: domid.unwrap_or(u32::MAX),
});
self.devices.release_all(uuid).await?;
Ok(GuestReconcilerResult::Changed { rerun: false })
}
@ -356,15 +362,7 @@ impl GuestReconciler {
}
}
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
if value.is_empty() {
None
} else {
Some(value)
}
}
fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
GuestNetworkState {
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),

View File

@ -0,0 +1,224 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use anyhow::{anyhow, Result};
use futures::StreamExt;
use krata::launchcfg::LaunchPackedFormat;
use krata::v1::common::GuestOciImageSpec;
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
use kratart::{launch::GuestLaunchRequest, Runtime};
use log::info;
use tokio::fs::{self, File};
use tokio::io::AsyncReadExt;
use tokio_tar::Archive;
use uuid::Uuid;
use crate::config::DaemonPciDeviceRdmReservePolicy;
use crate::devices::DaemonDeviceManager;
use crate::{
glt::GuestLookupTable,
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
};
pub struct GuestStarter<'a> {
pub devices: &'a DaemonDeviceManager,
pub kernel_path: &'a Path,
pub initrd_path: &'a Path,
pub addons_path: &'a Path,
pub packer: &'a OciPackerService,
pub glt: &'a GuestLookupTable,
pub runtime: &'a Runtime,
}
impl GuestStarter<'_> {
pub async fn oci_spec_tar_read_file(
&self,
file: &Path,
oci: &GuestOciImageSpec,
) -> Result<Vec<u8>> {
if oci.format() != OciImageFormat::Tar {
return Err(anyhow!(
"oci image spec for {} is required to be in tar format",
oci.digest
));
}
let image = self
.packer
.recall(&oci.digest, OciPackedFormat::Tar)
.await?;
let Some(image) = image else {
return Err(anyhow!("image {} was not found in tar format", oci.digest));
};
let mut archive = Archive::new(File::open(&image.path).await?);
let mut entries = archive.entries()?;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
if path == file {
let mut buffer = Vec::new();
entry.read_to_end(&mut buffer).await?;
return Ok(buffer);
}
}
Err(anyhow!(
"unable to find file {} in image {}",
file.to_string_lossy(),
oci.digest
))
}
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
let Some(ref spec) = guest.spec else {
return Err(anyhow!("guest spec not specified"));
};
let Some(ref image) = spec.image else {
return Err(anyhow!("image spec not provided"));
};
let oci = match image.image {
Some(Image::Oci(ref oci)) => oci,
None => {
return Err(anyhow!("oci spec not specified"));
}
};
let task = spec.task.as_ref().cloned().unwrap_or_default();
let image = self
.packer
.recall(
&oci.digest,
match oci.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => {
return Err(anyhow!("tar image format is not supported for guests"));
}
},
)
.await?;
let Some(image) = image else {
return Err(anyhow!(
"image {} in the requested format did not exist",
oci.digest
));
};
let kernel = if let Some(ref spec) = spec.kernel {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("kernel image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("kernel/image"), oci)
.await?
} else {
fs::read(&self.kernel_path).await?
};
let initrd = if let Some(ref spec) = spec.initrd {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("initrd image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("krata/initrd"), oci)
.await?
} else {
fs::read(&self.initrd_path).await?
};
let success = AtomicBool::new(false);
let _device_release_guard = scopeguard::guard(
(spec.devices.clone(), self.devices.clone()),
|(devices, manager)| {
if !success.load(Ordering::Acquire) {
tokio::task::spawn(async move {
for device in devices {
let _ = manager.release(&device.name, uuid).await;
}
});
}
},
);
let mut pcis = Vec::new();
for device in &spec.devices {
let state = self.devices.claim(&device.name, uuid).await?;
if let Some(cfg) = state.pci {
for location in cfg.locations {
let pci = PciDevice {
bdf: PciBdf::from_str(&location)?.with_domain(0),
permissive: cfg.permissive,
msi_translate: cfg.msi_translate,
power_management: cfg.power_management,
rdm_reserve_policy: match cfg.rdm_reserve_policy {
DaemonPciDeviceRdmReservePolicy::Strict => PciRdmReservePolicy::Strict,
DaemonPciDeviceRdmReservePolicy::Relaxed => {
PciRdmReservePolicy::Relaxed
}
},
};
pcis.push(pci);
}
} else {
return Err(anyhow!(
"device '{}' isn't a known device type",
device.name
));
}
}
let info = self
.runtime
.launch(GuestLaunchRequest {
format: LaunchPackedFormat::Squashfs,
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
} else {
Some(spec.name.clone())
},
image,
kernel,
initrd,
vcpus: spec.vcpus,
mem: spec.mem,
pcis,
env: task
.environment
.iter()
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
debug: false,
addons_image: Some(self.addons_path.to_path_buf()),
})
.await?;
self.glt.associate(uuid, info.domid).await;
info!("started guest {}", uuid);
guest.state = Some(GuestState {
status: GuestStatus::Started.into(),
network: Some(guestinfo_to_networkstate(&info)),
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
domid: info.domid,
});
success.store(true, Ordering::Release);
Ok(GuestReconcilerResult::Changed { rerun: false })
}
}
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
if value.is_empty() {
None
} else {
Some(value)
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata-guest"
description = "Guest services for the krata hypervisor."
description = "Guest services for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -14,13 +14,14 @@ cgroups-rs = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.11" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.11" }
libc = { workspace = true }
log = { workspace = true }
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
oci-spec = { workspace = true }
path-absolutize = { workspace = true }
platform-info = { workspace = true }
rtnetlink = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@ -1,15 +1,17 @@
use crate::{
childwait::{ChildEvent, ChildWait},
death,
exec::GuestExecTask,
metrics::MetricsCollector,
};
use anyhow::Result;
use cgroups_rs::Cgroup;
use krata::idm::{
client::IdmClient,
protocol::{
idm_event::Event, idm_request::Request, idm_response::Response, IdmEvent, IdmExitEvent,
IdmMetricsResponse, IdmPingResponse, IdmRequest,
client::{IdmClientStreamResponseHandle, IdmInternalClient},
internal::{
event::Event as EventType, request::Request as RequestType,
response::Response as ResponseType, Event, ExecStreamResponseUpdate, ExitEvent,
MetricsResponse, PingResponse, Request, Response,
},
};
use log::debug;
@ -17,14 +19,18 @@ use nix::unistd::Pid;
use tokio::{select, sync::broadcast};
pub struct GuestBackground {
idm: IdmClient,
idm: IdmInternalClient,
child: Pid,
_cgroup: Cgroup,
wait: ChildWait,
}
impl GuestBackground {
pub async fn new(idm: IdmClient, cgroup: Cgroup, child: Pid) -> Result<GuestBackground> {
pub async fn new(
idm: IdmInternalClient,
cgroup: Cgroup,
child: Pid,
) -> Result<GuestBackground> {
Ok(GuestBackground {
idm,
child,
@ -36,11 +42,11 @@ impl GuestBackground {
pub async fn run(&mut self) -> Result<()> {
let mut event_subscription = self.idm.subscribe().await?;
let mut requests_subscription = self.idm.requests().await?;
let mut request_streams_subscription = self.idm.request_streams().await?;
loop {
select! {
x = event_subscription.recv() => match x {
Ok(_event) => {
},
Err(broadcast::error::RecvError::Closed) => {
@ -54,8 +60,23 @@ impl GuestBackground {
},
x = requests_subscription.recv() => match x {
Ok(request) => {
self.handle_idm_request(request).await?;
Ok((id, request)) => {
self.handle_idm_request(id, request).await?;
},
Err(broadcast::error::RecvError::Closed) => {
debug!("idm packet channel closed");
break;
},
_ => {
continue;
}
},
x = request_streams_subscription.recv() => match x {
Ok(handle) => {
self.handle_idm_stream_request(handle).await?;
},
Err(broadcast::error::RecvError::Closed) => {
@ -79,25 +100,56 @@ impl GuestBackground {
Ok(())
}
async fn handle_idm_request(&mut self, packet: IdmRequest) -> Result<()> {
let id = packet.id;
async fn handle_idm_request(&mut self, id: u64, packet: Request) -> Result<()> {
match packet.request {
Some(Request::Ping(_)) => {
Some(RequestType::Ping(_)) => {
self.idm
.respond(id, Response::Ping(IdmPingResponse {}))
.respond(
id,
Response {
response: Some(ResponseType::Ping(PingResponse {})),
},
)
.await?;
}
Some(Request::Metrics(_)) => {
Some(RequestType::Metrics(_)) => {
let metrics = MetricsCollector::new()?;
let root = metrics.collect()?;
let response = IdmMetricsResponse { root: Some(root) };
let response = Response {
response: Some(ResponseType::Metrics(MetricsResponse { root: Some(root) })),
};
self.idm.respond(id, Response::Metrics(response)).await?;
self.idm.respond(id, response).await?;
}
None => {}
_ => {}
}
Ok(())
}
async fn handle_idm_stream_request(
&mut self,
handle: IdmClientStreamResponseHandle<Request>,
) -> Result<()> {
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
tokio::task::spawn(async move {
let exec = GuestExecTask { handle };
if let Err(error) = exec.run().await {
let _ = exec
.handle
.respond(Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: true,
error: error.to_string(),
exit_code: -1,
stdout: vec![],
stderr: vec![],
})),
})
.await;
}
});
}
Ok(())
}
@ -105,8 +157,8 @@ impl GuestBackground {
async fn child_event(&mut self, event: ChildEvent) -> Result<()> {
if event.pid == self.child {
self.idm
.emit(IdmEvent {
event: Some(Event::Exit(IdmExitEvent { code: event.status })),
.emit(Event {
event: Some(EventType::Exit(ExitEvent { code: event.status })),
})
.await?;
death(event.status).await?;

172
crates/guest/src/exec.rs Normal file
View File

@ -0,0 +1,172 @@
use std::{collections::HashMap, process::Stdio};
use anyhow::{anyhow, Result};
use krata::idm::{
client::IdmClientStreamResponseHandle,
internal::{
exec_stream_request_update::Update, request::Request as RequestType,
ExecStreamResponseUpdate,
},
internal::{response::Response as ResponseType, Request, Response},
};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
join,
process::Command,
};
pub struct GuestExecTask {
pub handle: IdmClientStreamResponseHandle<Request>,
}
impl GuestExecTask {
pub async fn run(&self) -> Result<()> {
let mut receiver = self.handle.take().await?;
let Some(ref request) = self.handle.initial.request else {
return Err(anyhow!("request was empty"));
};
let RequestType::ExecStream(update) = request else {
return Err(anyhow!("request was not an exec update"));
};
let Some(Update::Start(ref start)) = update.update else {
return Err(anyhow!("first request did not contain a start update"));
};
let mut cmd = start.command.clone();
if cmd.is_empty() {
return Err(anyhow!("command line was empty"));
}
let exe = cmd.remove(0);
let mut env = HashMap::new();
for entry in &start.environment {
env.insert(entry.key.clone(), entry.value.clone());
}
if !env.contains_key("PATH") {
env.insert(
"PATH".to_string(),
"/bin:/usr/bin:/usr/local/bin".to_string(),
);
}
let dir = if start.working_directory.is_empty() {
"/".to_string()
} else {
start.working_directory.clone()
};
let mut child = Command::new(exe)
.args(cmd)
.envs(env)
.current_dir(dir)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()
.map_err(|error| anyhow!("failed to spawn: {}", error))?;
let mut stdin = child
.stdin
.take()
.ok_or_else(|| anyhow!("stdin was missing"))?;
let mut stdout = child
.stdout
.take()
.ok_or_else(|| anyhow!("stdout was missing"))?;
let mut stderr = child
.stderr
.take()
.ok_or_else(|| anyhow!("stderr was missing"))?;
let stdout_handle = self.handle.clone();
let stdout_task = tokio::task::spawn(async move {
let mut stdout_buffer = vec![0u8; 8 * 1024];
loop {
let Ok(size) = stdout.read(&mut stdout_buffer).await else {
break;
};
if size > 0 {
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: false,
exit_code: 0,
error: String::new(),
stdout: stdout_buffer[0..size].to_vec(),
stderr: vec![],
})),
};
let _ = stdout_handle.respond(response).await;
} else {
break;
}
}
});
let stderr_handle = self.handle.clone();
let stderr_task = tokio::task::spawn(async move {
let mut stderr_buffer = vec![0u8; 8 * 1024];
loop {
let Ok(size) = stderr.read(&mut stderr_buffer).await else {
break;
};
if size > 0 {
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: false,
exit_code: 0,
error: String::new(),
stdout: vec![],
stderr: stderr_buffer[0..size].to_vec(),
})),
};
let _ = stderr_handle.respond(response).await;
} else {
break;
}
}
});
let stdin_task = tokio::task::spawn(async move {
loop {
let Some(request) = receiver.recv().await else {
break;
};
let Some(RequestType::ExecStream(update)) = request.request else {
continue;
};
let Some(Update::Stdin(update)) = update.update else {
continue;
};
if stdin.write_all(&update.data).await.is_err() {
break;
}
}
});
let exit = child.wait().await?;
let code = exit.code().unwrap_or(-1);
let _ = join!(stdout_task, stderr_task);
stdin_task.abort();
let response = Response {
response: Some(ResponseType::ExecStream(ExecStreamResponseUpdate {
exited: true,
exit_code: code,
error: String::new(),
stdout: vec![],
stderr: vec![],
})),
};
self.handle.respond(response).await?;
Ok(())
}
}

View File

@ -3,7 +3,8 @@ use cgroups_rs::{Cgroup, CgroupPid};
use futures::stream::TryStreamExt;
use ipnetwork::IpNetwork;
use krata::ethtool::EthtoolHandle;
use krata::idm::client::IdmClient;
use krata::idm::client::IdmInternalClient;
use krata::idm::internal::INTERNAL_IDM_CHANNEL;
use krata::launchcfg::{LaunchInfo, LaunchNetwork, LaunchPackedFormat};
use libc::{sethostname, setsid, TIOCSCTTY};
use log::{trace, warn};
@ -11,6 +12,7 @@ use nix::ioctl_write_int_bad;
use nix::unistd::{dup2, execve, fork, ForkResult, Pid};
use oci_spec::image::{Config, ImageConfiguration};
use path_absolutize::Absolutize;
use platform_info::{PlatformInfo, PlatformInfoAPI, UNameAPI};
use std::collections::HashMap;
use std::ffi::CString;
use std::fs::{File, OpenOptions, Permissions};
@ -49,6 +51,10 @@ const NEW_ROOT_DEV_PATH: &str = "/newroot/dev";
const IMAGE_CONFIG_JSON_PATH: &str = "/config/image/config.json";
const LAUNCH_CONFIG_JSON_PATH: &str = "/config/launch.json";
const ADDONS_DEVICE_PATH: &str = "/dev/xvdc";
const ADDONS_MOUNT_PATH: &str = "/addons";
const ADDONS_MODULES_PATH: &str = "/addons/modules";
ioctl_write_int_bad!(set_controlling_terminal, TIOCSCTTY);
pub struct GuestInit {}
@ -77,7 +83,7 @@ impl GuestInit {
Err(error) => warn!("failed to open console: {}", error),
};
let idm = IdmClient::open("/dev/hvc1")
let idm = IdmInternalClient::open(INTERNAL_IDM_CHANNEL, "/dev/hvc1")
.await
.map_err(|x| anyhow!("failed to open idm client: {}", x))?;
self.mount_config_image().await?;
@ -87,7 +93,10 @@ impl GuestInit {
self.mount_root_image(launch.root.format.clone()).await?;
self.mount_addons().await?;
self.mount_new_root().await?;
self.mount_kernel_modules().await?;
self.bind_new_root().await?;
if let Some(hostname) = launch.hostname.clone() {
@ -136,16 +145,60 @@ impl GuestInit {
self.create_dir("/root", Some(0o0700)).await?;
self.create_dir("/tmp", None).await?;
self.create_dir("/run", Some(0o0755)).await?;
self.mount_kernel_fs("devtmpfs", "/dev", "mode=0755", None)
self.mount_kernel_fs("devtmpfs", "/dev", "mode=0755", None, None)
.await?;
self.mount_kernel_fs("proc", "/proc", "", None, None)
.await?;
self.mount_kernel_fs("sysfs", "/sys", "", None, None)
.await?;
self.create_dir("/dev/pts", Some(0o0755)).await?;
self.mount_kernel_fs("devpts", "/dev/pts", "", None, Some("/dev/ptmx"))
.await?;
self.mount_kernel_fs("proc", "/proc", "", None).await?;
self.mount_kernel_fs("sysfs", "/sys", "", None).await?;
fs::symlink("/proc/self/fd", "/dev/fd").await?;
fs::symlink("/proc/self/fd/0", "/dev/stdin").await?;
fs::symlink("/proc/self/fd/1", "/dev/stdout").await?;
fs::symlink("/proc/self/fd/2", "/dev/stderr").await?;
self.mount_kernel_fs("cgroup2", "/sys/fs/cgroup", "", Some(MountFlags::RELATIME))
.await?;
self.mount_kernel_fs(
"cgroup2",
"/sys/fs/cgroup",
"",
Some(MountFlags::RELATIME),
None,
)
.await?;
Ok(())
}
async fn mount_addons(&mut self) -> Result<()> {
if !fs::try_exists(ADDONS_DEVICE_PATH).await? {
return Ok(());
}
self.mount_image(
&PathBuf::from(ADDONS_DEVICE_PATH),
&PathBuf::from(ADDONS_MOUNT_PATH),
LaunchPackedFormat::Squashfs,
)
.await?;
Ok(())
}
async fn mount_kernel_modules(&mut self) -> Result<()> {
if !fs::try_exists(ADDONS_MODULES_PATH).await? {
return Ok(());
}
let Some(platform_info) = PlatformInfo::new().ok() else {
return Ok(());
};
let kernel_release = platform_info.release().to_string_lossy().to_string();
let modules_path = format!("/newroot/lib/modules/{}", kernel_release);
fs::create_dir_all(&modules_path).await?;
Mount::builder()
.fstype(FilesystemType::Manual("none"))
.flags(MountFlags::BIND | MountFlags::RDONLY)
.mount(ADDONS_MODULES_PATH, modules_path)?;
Ok(())
}
@ -169,13 +222,14 @@ impl GuestInit {
path: &str,
data: &str,
flags: Option<MountFlags>,
source: Option<&str>,
) -> Result<()> {
trace!("mounting kernel fs {} to {}", fstype, path);
Mount::builder()
.fstype(FilesystemType::Manual(fstype))
.flags(MountFlags::NOEXEC | MountFlags::NOSUID | flags.unwrap_or(MountFlags::empty()))
.data(data)
.mount(fstype, path)?;
.mount(source.unwrap_or(fstype), path)?;
Ok(())
}
@ -438,7 +492,12 @@ impl GuestInit {
Ok(())
}
async fn run(&mut self, config: &Config, launch: &LaunchInfo, idm: IdmClient) -> Result<()> {
async fn run(
&mut self,
config: &Config,
launch: &LaunchInfo,
idm: IdmInternalClient,
) -> Result<()> {
let mut cmd = match config.cmd() {
None => vec![],
Some(value) => value.clone(),
@ -473,7 +532,7 @@ impl GuestInit {
env.insert("TERM".to_string(), "xterm".to_string());
}
let path = GuestInit::resolve_executable(&env, path.into())?;
let path = resolve_executable(&env, path.into())?;
let Some(file_name) = path.file_name() else {
return Err(anyhow!("cannot get file name of command path"));
};
@ -531,27 +590,6 @@ impl GuestInit {
map
}
fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
if path.is_absolute() {
return Ok(path);
}
if path.is_file() {
return Ok(path.absolutize()?.to_path_buf());
}
if let Some(path_var) = env.get("PATH") {
for item in path_var.split(':') {
let mut exe_path: PathBuf = item.into();
exe_path.push(&path);
if exe_path.is_file() {
return Ok(exe_path);
}
}
}
Ok(path)
}
fn env_list(env: HashMap<String, String>) -> Vec<String> {
env.iter()
.map(|(key, value)| format!("{}={}", key, value))
@ -560,7 +598,7 @@ impl GuestInit {
async fn fork_and_exec(
&mut self,
idm: IdmClient,
idm: IdmInternalClient,
cgroup: Cgroup,
working_dir: String,
path: CString,
@ -596,9 +634,35 @@ impl GuestInit {
Ok(())
}
async fn background(&mut self, idm: IdmClient, cgroup: Cgroup, executed: Pid) -> Result<()> {
async fn background(
&mut self,
idm: IdmInternalClient,
cgroup: Cgroup,
executed: Pid,
) -> Result<()> {
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
background.run().await?;
Ok(())
}
}
pub fn resolve_executable(env: &HashMap<String, String>, path: PathBuf) -> Result<PathBuf> {
if path.is_absolute() {
return Ok(path);
}
if path.is_file() {
return Ok(path.absolutize()?.to_path_buf());
}
if let Some(path_var) = env.get("PATH") {
for item in path_var.split(':') {
let mut exe_path: PathBuf = item.into();
exe_path.push(&path);
if exe_path.is_file() {
return Ok(exe_path);
}
}
}
Ok(path)
}

View File

@ -6,6 +6,7 @@ use xenstore::{XsdClient, XsdInterface};
pub mod background;
pub mod childwait;
pub mod exec;
pub mod init;
pub mod metrics;

View File

@ -1,7 +1,7 @@
use std::{ops::Add, path::Path};
use anyhow::Result;
use krata::idm::protocol::{IdmMetricFormat, IdmMetricNode};
use krata::idm::internal::{MetricFormat, MetricNode};
use sysinfo::Process;
pub struct MetricsCollector {}
@ -11,9 +11,9 @@ impl MetricsCollector {
Ok(MetricsCollector {})
}
pub fn collect(&self) -> Result<IdmMetricNode> {
pub fn collect(&self) -> Result<MetricNode> {
let mut sysinfo = sysinfo::System::new();
Ok(IdmMetricNode::structural(
Ok(MetricNode::structural(
"guest",
vec![
self.collect_system(&mut sysinfo)?,
@ -22,22 +22,22 @@ impl MetricsCollector {
))
}
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
sysinfo.refresh_memory();
Ok(IdmMetricNode::structural(
Ok(MetricNode::structural(
"system",
vec![IdmMetricNode::structural(
vec![MetricNode::structural(
"memory",
vec![
IdmMetricNode::value("total", sysinfo.total_memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("used", sysinfo.used_memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("free", sysinfo.free_memory(), IdmMetricFormat::Bytes),
MetricNode::value("total", sysinfo.total_memory(), MetricFormat::Bytes),
MetricNode::value("used", sysinfo.used_memory(), MetricFormat::Bytes),
MetricNode::value("free", sysinfo.free_memory(), MetricFormat::Bytes),
],
)],
))
}
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<MetricNode> {
sysinfo.refresh_processes();
let mut processes = Vec::new();
let mut sysinfo_processes = sysinfo.processes().values().collect::<Vec<_>>();
@ -48,71 +48,68 @@ impl MetricsCollector {
}
processes.push(MetricsCollector::process_node(process)?);
}
Ok(IdmMetricNode::structural("process", processes))
Ok(MetricNode::structural("process", processes))
}
fn process_node(process: &Process) -> Result<IdmMetricNode> {
fn process_node(process: &Process) -> Result<MetricNode> {
let mut metrics = vec![];
if let Some(parent) = process.parent() {
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"parent",
parent.as_u32() as u64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
}
if let Some(exe) = process.exe().and_then(path_as_str) {
metrics.push(IdmMetricNode::raw_value("executable", exe));
metrics.push(MetricNode::raw_value("executable", exe));
}
if let Some(working_directory) = process.cwd().and_then(path_as_str) {
metrics.push(IdmMetricNode::raw_value("cwd", working_directory));
metrics.push(MetricNode::raw_value("cwd", working_directory));
}
let cmdline = process.cmd().to_vec();
metrics.push(IdmMetricNode::raw_value("cmdline", cmdline));
metrics.push(IdmMetricNode::structural(
metrics.push(MetricNode::raw_value("cmdline", cmdline));
metrics.push(MetricNode::structural(
"memory",
vec![
IdmMetricNode::value("resident", process.memory(), IdmMetricFormat::Bytes),
IdmMetricNode::value("virtual", process.virtual_memory(), IdmMetricFormat::Bytes),
MetricNode::value("resident", process.memory(), MetricFormat::Bytes),
MetricNode::value("virtual", process.virtual_memory(), MetricFormat::Bytes),
],
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"lifetime",
process.run_time(),
IdmMetricFormat::DurationSeconds,
MetricFormat::DurationSeconds,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"uid",
process.user_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"gid",
process.group_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"euid",
process
.effective_user_id()
.map(|x| (*x).add(0))
.unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
metrics.push(IdmMetricNode::value(
metrics.push(MetricNode::value(
"egid",
process.effective_group_id().map(|x| x.add(0)).unwrap_or(0) as f64,
IdmMetricFormat::Integer,
MetricFormat::Integer,
));
Ok(IdmMetricNode::structural(
process.pid().to_string(),
metrics,
))
Ok(MetricNode::structural(process.pid().to_string(), metrics))
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata"
description = "Client library and common services for the krata hypervisor."
description = "Client library and common services for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true

View File

@ -6,12 +6,20 @@ fn main() -> Result<()> {
.descriptor_pool("crate::DESCRIPTOR_POOL")
.configure(
&mut config,
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
tonic_build::configure().compile_with_config(
config,
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
Ok(())

View File

@ -1,67 +0,0 @@
syntax = "proto3";
package krata.bus.idm;
option java_multiple_files = true;
option java_package = "dev.krata.proto.bus.idm";
option java_outer_classname = "IdmProto";
import "google/protobuf/struct.proto";
message IdmPacket {
oneof content {
IdmEvent event = 1;
IdmRequest request = 2;
IdmResponse response = 3;
}
}
message IdmEvent {
oneof event {
IdmExitEvent exit = 1;
}
}
message IdmExitEvent {
int32 code = 1;
}
message IdmRequest {
uint64 id = 1;
oneof request {
IdmPingRequest ping = 2;
IdmMetricsRequest metrics = 3;
}
}
message IdmPingRequest {}
message IdmMetricsRequest {}
message IdmResponse {
uint64 id = 1;
oneof response {
IdmPingResponse ping = 2;
IdmMetricsResponse metrics = 3;
}
}
message IdmPingResponse {}
message IdmMetricsResponse {
IdmMetricNode root = 1;
}
message IdmMetricNode {
string name = 1;
google.protobuf.Value value = 2;
IdmMetricFormat format = 3;
repeated IdmMetricNode children = 4;
}
enum IdmMetricFormat {
IDM_METRIC_FORMAT_UNKNOWN = 0;
IDM_METRIC_FORMAT_BYTES = 1;
IDM_METRIC_FORMAT_INTEGER = 2;
IDM_METRIC_FORMAT_DURATION_SECONDS = 3;
}

View File

@ -0,0 +1,89 @@
syntax = "proto3";
package krata.idm.internal;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.internal";
option java_outer_classname = "IdmInternalProto";
import "google/protobuf/struct.proto";
message ExitEvent {
int32 code = 1;
}
message PingRequest {}
message PingResponse {}
message MetricsRequest {}
message MetricsResponse {
MetricNode root = 1;
}
message MetricNode {
string name = 1;
google.protobuf.Value value = 2;
MetricFormat format = 3;
repeated MetricNode children = 4;
}
enum MetricFormat {
METRIC_FORMAT_UNKNOWN = 0;
METRIC_FORMAT_BYTES = 1;
METRIC_FORMAT_INTEGER = 2;
METRIC_FORMAT_DURATION_SECONDS = 3;
}
message ExecEnvVar {
string key = 1;
string value = 2;
}
message ExecStreamRequestStart {
repeated ExecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
}
message ExecStreamRequestStdin {
bytes data = 1;
}
message ExecStreamRequestUpdate {
oneof update {
ExecStreamRequestStart start = 1;
ExecStreamRequestStdin stdin = 2;
}
}
message ExecStreamResponseUpdate {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message Event {
oneof event {
ExitEvent exit = 1;
}
}
message Request {
oneof request {
PingRequest ping = 1;
MetricsRequest metrics = 2;
ExecStreamRequestUpdate exec_stream = 3;
}
}
message Response {
oneof response {
PingResponse ping = 1;
MetricsResponse metrics = 2;
ExecStreamResponseUpdate exec_stream = 3;
}
}

View File

@ -0,0 +1,27 @@
syntax = "proto3";
package krata.idm.transport;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.transport";
option java_outer_classname = "IdmTransportProto";
message IdmTransportPacket {
uint64 id = 1;
uint64 channel = 2;
IdmTransportPacketForm form = 3;
bytes data = 4;
}
enum IdmTransportPacketForm {
IDM_TRANSPORT_PACKET_FORM_UNKNOWN = 0;
IDM_TRANSPORT_PACKET_FORM_RAW = 1;
IDM_TRANSPORT_PACKET_FORM_EVENT = 2;
IDM_TRANSPORT_PACKET_FORM_REQUEST = 3;
IDM_TRANSPORT_PACKET_FORM_RESPONSE = 4;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST = 5;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_UPDATE = 6;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_UPDATE = 7;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_CLOSED = 8;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_CLOSED = 9;
}

View File

@ -17,10 +17,15 @@ message Guest {
message GuestSpec {
string name = 1;
GuestImageSpec image = 2;
uint32 vcpus = 3;
uint64 mem = 4;
GuestTaskSpec task = 5;
repeated GuestSpecAnnotation annotations = 6;
// If not specified, defaults to the daemon default kernel.
GuestImageSpec kernel = 3;
// If not specified, defaults to the daemon default initrd.
GuestImageSpec initrd = 4;
uint32 vcpus = 5;
uint64 mem = 6;
GuestTaskSpec task = 7;
repeated GuestSpecAnnotation annotations = 8;
repeated GuestSpecDevice devices = 9;
}
message GuestImageSpec {
@ -29,20 +34,23 @@ message GuestImageSpec {
}
}
enum GuestOciImageFormat {
GUEST_OCI_IMAGE_FORMAT_UNKNOWN = 0;
GUEST_OCI_IMAGE_FORMAT_SQUASHFS = 1;
GUEST_OCI_IMAGE_FORMAT_EROFS = 2;
enum OciImageFormat {
OCI_IMAGE_FORMAT_UNKNOWN = 0;
OCI_IMAGE_FORMAT_SQUASHFS = 1;
OCI_IMAGE_FORMAT_EROFS = 2;
// Tar format is not launchable, and is intended for kernel images.
OCI_IMAGE_FORMAT_TAR = 3;
}
message GuestOciImageSpec {
string digest = 1;
GuestOciImageFormat format = 2;
OciImageFormat format = 2;
}
message GuestTaskSpec {
repeated GuestTaskSpecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
}
message GuestTaskSpecEnvVar {
@ -55,12 +63,17 @@ message GuestSpecAnnotation {
string value = 2;
}
message GuestSpecDevice {
string name = 1;
}
message GuestState {
GuestStatus status = 1;
GuestNetworkState network = 2;
GuestExitInfo exit_info = 3;
GuestErrorInfo error_info = 4;
uint32 domid = 5;
string host = 5;
uint32 domid = 6;
}
enum GuestStatus {

View File

@ -6,14 +6,19 @@ option java_multiple_files = true;
option java_package = "dev.krata.proto.v1.control";
option java_outer_classname = "ControlProto";
import "krata/bus/idm.proto";
import "krata/idm/transport.proto";
import "krata/v1/common.proto";
service ControlService {
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
rpc ExecGuest(stream ExecGuestRequest) returns (stream ExecGuestReply);
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
@ -24,6 +29,14 @@ service ControlService {
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
}
message IdentifyHostRequest {}
message IdentifyHostReply {
string host_uuid = 1;
uint32 host_domid = 2;
string krata_version = 3;
}
message CreateGuestRequest {
krata.v1.common.GuestSpec spec = 1;
}
@ -52,6 +65,20 @@ message ListGuestsReply {
repeated krata.v1.common.Guest guests = 1;
}
message ExecGuestRequest {
string guest_id = 1;
krata.v1.common.GuestTaskSpec task = 2;
bytes data = 3;
}
message ExecGuestReply {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message ConsoleDataRequest {
string guest_id = 1;
bytes data = 2;
@ -84,51 +111,92 @@ message ReadGuestMetricsReply {
message SnoopIdmRequest {}
message SnoopIdmReply {
uint32 from = 1;
uint32 to = 2;
krata.bus.idm.IdmPacket packet = 3;
string from = 1;
string to = 2;
krata.idm.transport.IdmTransportPacket packet = 3;
}
enum PullImageProgressLayerPhase {
PULL_IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
PULL_IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
message ImageProgress {
ImageProgressPhase phase = 1;
repeated ImageProgressLayer layers = 2;
ImageProgressIndication indication = 3;
}
message PullImageProgressLayer {
enum ImageProgressPhase {
IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_PHASE_STARTED = 1;
IMAGE_PROGRESS_PHASE_RESOLVING = 2;
IMAGE_PROGRESS_PHASE_RESOLVED = 3;
IMAGE_PROGRESS_PHASE_CONFIG_DOWNLOAD = 4;
IMAGE_PROGRESS_PHASE_LAYER_DOWNLOAD = 5;
IMAGE_PROGRESS_PHASE_ASSEMBLE = 6;
IMAGE_PROGRESS_PHASE_PACK = 7;
IMAGE_PROGRESS_PHASE_COMPLETE = 8;
}
message ImageProgressLayer {
string id = 1;
PullImageProgressLayerPhase phase = 2;
uint64 value = 3;
uint64 total = 4;
ImageProgressLayerPhase phase = 2;
ImageProgressIndication indication = 3;
}
enum PullImageProgressPhase {
PULL_IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
PULL_IMAGE_PROGRESS_PHASE_RESOLVING = 1;
PULL_IMAGE_PROGRESS_PHASE_RESOLVED = 2;
PULL_IMAGE_PROGRESS_PHASE_CONFIG_ACQUIRE = 3;
PULL_IMAGE_PROGRESS_PHASE_LAYER_ACQUIRE = 4;
PULL_IMAGE_PROGRESS_PHASE_PACKING = 5;
PULL_IMAGE_PROGRESS_PHASE_COMPLETE = 6;
enum ImageProgressLayerPhase {
IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
}
message PullImageProgress {
PullImageProgressPhase phase = 1;
repeated PullImageProgressLayer layers = 2;
uint64 value = 3;
uint64 total = 4;
message ImageProgressIndication {
oneof indication {
ImageProgressIndicationBar bar = 1;
ImageProgressIndicationSpinner spinner = 2;
ImageProgressIndicationHidden hidden = 3;
ImageProgressIndicationCompleted completed = 4;
}
}
message ImageProgressIndicationBar {
string message = 1;
uint64 current = 2;
uint64 total = 3;
bool is_bytes = 4;
}
message ImageProgressIndicationSpinner {
string message = 1;
}
message ImageProgressIndicationHidden {}
message ImageProgressIndicationCompleted {
string message = 1;
uint64 total = 2;
bool is_bytes = 3;
}
message PullImageRequest {
string image = 1;
krata.v1.common.GuestOciImageFormat format = 2;
krata.v1.common.OciImageFormat format = 2;
bool overwrite_cache = 3;
}
message PullImageReply {
PullImageProgress progress = 1;
ImageProgress progress = 1;
string digest = 2;
krata.v1.common.GuestOciImageFormat format = 3;
krata.v1.common.OciImageFormat format = 3;
}
message DeviceInfo {
string name = 1;
bool claimed = 2;
string owner = 3;
}
message ListDevicesRequest {}
message ListDevicesReply {
repeated DeviceInfo devices = 1;
}

View File

@ -1 +0,0 @@
pub mod idm;

View File

@ -8,10 +8,6 @@ use std::{
time::Duration,
};
use super::protocol::{
idm_packet::Content, idm_request::Request, idm_response::Response, IdmEvent, IdmPacket,
IdmRequest, IdmResponse,
};
use anyhow::{anyhow, Result};
use log::{debug, error};
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
@ -22,14 +18,23 @@ use tokio::{
select,
sync::{
broadcast,
mpsc::{channel, Receiver, Sender},
mpsc::{self, Receiver, Sender},
oneshot, Mutex,
},
task::JoinHandle,
time::timeout,
};
type RequestMap = Arc<Mutex<HashMap<u64, oneshot::Sender<IdmResponse>>>>;
use super::{
internal,
serialize::{IdmRequest, IdmSerializable},
transport::{IdmTransportPacket, IdmTransportPacketForm},
};
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
const IDM_PACKET_QUEUE_LEN: usize = 100;
const IDM_REQUEST_TIMEOUT_SECS: u64 = 10;
@ -37,8 +42,8 @@ const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
#[async_trait::async_trait]
pub trait IdmBackend: Send {
async fn recv(&mut self) -> Result<IdmPacket>;
async fn send(&mut self, packet: IdmPacket) -> Result<()>;
async fn recv(&mut self) -> Result<IdmTransportPacket>;
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
}
pub struct IdmFileBackend {
@ -66,30 +71,30 @@ impl IdmFileBackend {
#[async_trait::async_trait]
impl IdmBackend for IdmFileBackend {
async fn recv(&mut self) -> Result<IdmPacket> {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
let mut fd = self.read_fd.lock().await;
let mut guard = fd.readable_mut().await?;
let b1 = guard.get_inner_mut().read_u8().await?;
if b1 != 0xff {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let b2 = guard.get_inner_mut().read_u8().await?;
if b2 != 0xff {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let size = guard.get_inner_mut().read_u32_le().await?;
if size == 0 {
return Ok(IdmPacket::default());
return Ok(IdmTransportPacket::default());
}
let mut buffer = vec![0u8; size as usize];
guard.get_inner_mut().read_exact(&mut buffer).await?;
match IdmPacket::decode(buffer.as_slice()) {
match IdmTransportPacket::decode(buffer.as_slice()) {
Ok(packet) => Ok(packet),
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
}
}
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
let mut file = self.write.lock().await;
let data = packet.encode_to_vec();
file.write_all(&[0xff, 0xff]).await?;
@ -100,16 +105,19 @@ impl IdmBackend for IdmFileBackend {
}
#[derive(Clone)]
pub struct IdmClient {
request_backend_sender: broadcast::Sender<IdmRequest>,
pub struct IdmClient<R: IdmRequest, E: IdmSerializable> {
channel: u64,
request_backend_sender: broadcast::Sender<(u64, R)>,
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
next_request_id: Arc<Mutex<u64>>,
event_receiver_sender: broadcast::Sender<IdmEvent>,
tx_sender: Sender<IdmPacket>,
requests: RequestMap,
event_receiver_sender: broadcast::Sender<E>,
tx_sender: Sender<IdmTransportPacket>,
requests: OneshotRequestMap<R>,
request_streams: StreamRequestMap<R>,
task: Arc<JoinHandle<()>>,
}
impl Drop for IdmClient {
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClient<R, E> {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
@ -117,21 +125,122 @@ impl Drop for IdmClient {
}
}
impl IdmClient {
pub async fn new(backend: Box<dyn IdmBackend>) -> Result<IdmClient> {
pub struct IdmClientStreamRequestHandle<R: IdmRequest, E: IdmSerializable> {
pub id: u64,
pub receiver: Receiver<R::Response>,
pub client: IdmClient<R, E>,
}
impl<R: IdmRequest, E: IdmSerializable> IdmClientStreamRequestHandle<R, E> {
pub async fn update(&self, request: R) -> Result<()> {
self.client
.tx_sender
.send(IdmTransportPacket {
id: self.id,
channel: self.client.channel,
form: IdmTransportPacketForm::StreamRequestUpdate.into(),
data: request.encode()?,
})
.await?;
Ok(())
}
}
impl<R: IdmRequest, E: IdmSerializable> Drop for IdmClientStreamRequestHandle<R, E> {
fn drop(&mut self) {
let id = self.id;
let client = self.client.clone();
tokio::task::spawn(async move {
let _ = client
.tx_sender
.send(IdmTransportPacket {
id,
channel: client.channel,
form: IdmTransportPacketForm::StreamRequestClosed.into(),
data: vec![],
})
.await;
});
}
}
#[derive(Clone)]
pub struct IdmClientStreamResponseHandle<R: IdmRequest> {
pub initial: R,
pub id: u64,
channel: u64,
tx_sender: Sender<IdmTransportPacket>,
receiver: Arc<Mutex<Option<Receiver<R>>>>,
}
impl<R: IdmRequest> IdmClientStreamResponseHandle<R> {
pub async fn respond(&self, response: R::Response) -> Result<()> {
self.tx_sender
.send(IdmTransportPacket {
id: self.id,
channel: self.channel,
form: IdmTransportPacketForm::StreamResponseUpdate.into(),
data: response.encode()?,
})
.await?;
Ok(())
}
pub async fn take(&self) -> Result<Receiver<R>> {
let mut guard = self.receiver.lock().await;
let Some(receiver) = (*guard).take() else {
return Err(anyhow!("request has already been claimed!"));
};
Ok(receiver)
}
}
impl<R: IdmRequest> Drop for IdmClientStreamResponseHandle<R> {
fn drop(&mut self) {
if Arc::strong_count(&self.receiver) <= 1 {
let id = self.id;
let channel = self.channel;
let tx_sender = self.tx_sender.clone();
tokio::task::spawn(async move {
let _ = tx_sender
.send(IdmTransportPacket {
id,
channel,
form: IdmTransportPacketForm::StreamResponseClosed.into(),
data: vec![],
})
.await;
});
}
}
}
impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
pub async fn new(channel: u64, backend: Box<dyn IdmBackend>) -> Result<Self> {
let requests = Arc::new(Mutex::new(HashMap::new()));
let request_streams = Arc::new(Mutex::new(HashMap::new()));
let request_update_streams = Arc::new(Mutex::new(HashMap::new()));
let (event_sender, event_receiver) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (internal_request_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (tx_sender, tx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
let (internal_request_stream_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
let (tx_sender, tx_receiver) = mpsc::channel(IDM_PACKET_QUEUE_LEN);
let backend_event_sender = event_sender.clone();
let request_backend_sender = internal_request_backend_sender.clone();
let request_stream_backend_sender = internal_request_stream_backend_sender.clone();
let requests_for_client = requests.clone();
let request_streams_for_client = request_streams.clone();
let tx_sender_for_client = tx_sender.clone();
let task = tokio::task::spawn(async move {
if let Err(error) = IdmClient::process(
backend,
channel,
tx_sender,
backend_event_sender,
requests,
request_streams,
request_update_streams,
internal_request_backend_sender,
internal_request_stream_backend_sender,
event_receiver,
tx_receiver,
)
@ -141,16 +250,19 @@ impl IdmClient {
}
});
Ok(IdmClient {
channel,
next_request_id: Arc::new(Mutex::new(0)),
event_receiver_sender: event_sender.clone(),
request_backend_sender,
request_stream_backend_sender,
requests: requests_for_client,
tx_sender,
request_streams: request_streams_for_client,
tx_sender: tx_sender_for_client,
task: Arc::new(task),
})
}
pub async fn open<P: AsRef<Path>>(path: P) -> Result<IdmClient> {
pub async fn open<P: AsRef<Path>>(channel: u64, path: P) -> Result<Self> {
let read_file = File::options()
.read(true)
.write(false)
@ -164,39 +276,54 @@ impl IdmClient {
.open(path)
.await?;
let backend = IdmFileBackend::new(read_file, write_file).await?;
IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await
IdmClient::new(channel, Box::new(backend) as Box<dyn IdmBackend>).await
}
pub async fn emit(&self, event: IdmEvent) -> Result<()> {
pub async fn emit<T: IdmSerializable>(&self, event: T) -> Result<()> {
let id = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
*guard = req.wrapping_add(1);
req
};
self.tx_sender
.send(IdmPacket {
content: Some(Content::Event(event)),
.send(IdmTransportPacket {
id,
form: IdmTransportPacketForm::Event.into(),
channel: self.channel,
data: event.encode()?,
})
.await?;
Ok(())
}
pub async fn requests(&self) -> Result<broadcast::Receiver<IdmRequest>> {
pub async fn requests(&self) -> Result<broadcast::Receiver<(u64, R)>> {
Ok(self.request_backend_sender.subscribe())
}
pub async fn respond(&self, id: u64, response: Response) -> Result<()> {
let packet = IdmPacket {
content: Some(Content::Response(IdmResponse {
id,
response: Some(response),
})),
pub async fn request_streams(
&self,
) -> Result<broadcast::Receiver<IdmClientStreamResponseHandle<R>>> {
Ok(self.request_stream_backend_sender.subscribe())
}
pub async fn respond<T: IdmSerializable>(&self, id: u64, response: T) -> Result<()> {
let packet = IdmTransportPacket {
id,
form: IdmTransportPacketForm::Response.into(),
channel: self.channel,
data: response.encode()?,
};
self.tx_sender.send(packet).await?;
Ok(())
}
pub async fn subscribe(&self) -> Result<broadcast::Receiver<IdmEvent>> {
pub async fn subscribe(&self) -> Result<broadcast::Receiver<E>> {
Ok(self.event_receiver_sender.subscribe())
}
pub async fn send(&self, request: Request) -> Result<Response> {
let (sender, receiver) = oneshot::channel::<IdmResponse>();
pub async fn send(&self, request: R) -> Result<R::Response> {
let (sender, receiver) = oneshot::channel::<R::Response>();
let req = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
@ -217,52 +344,135 @@ impl IdmClient {
});
});
self.tx_sender
.send(IdmPacket {
content: Some(Content::Request(IdmRequest {
id: req,
request: Some(request),
})),
.send(IdmTransportPacket {
id: req,
channel: self.channel,
form: IdmTransportPacketForm::Request.into(),
data: request.encode()?,
})
.await?;
let response = timeout(Duration::from_secs(IDM_REQUEST_TIMEOUT_SECS), receiver).await??;
success.store(true, Ordering::Release);
if let Some(response) = response.response {
Ok(response)
} else {
Err(anyhow!("response did not contain any content"))
}
Ok(response)
}
pub async fn send_stream(&self, request: R) -> Result<IdmClientStreamRequestHandle<R, E>> {
let (sender, receiver) = mpsc::channel::<R::Response>(100);
let req = {
let mut guard = self.next_request_id.lock().await;
let req = *guard;
*guard = req.wrapping_add(1);
req
};
let mut requests = self.request_streams.lock().await;
requests.insert(req, sender);
drop(requests);
self.tx_sender
.send(IdmTransportPacket {
id: req,
channel: self.channel,
form: IdmTransportPacketForm::StreamRequest.into(),
data: request.encode()?,
})
.await?;
Ok(IdmClientStreamRequestHandle {
id: req,
receiver,
client: self.clone(),
})
}
#[allow(clippy::too_many_arguments)]
async fn process(
mut backend: Box<dyn IdmBackend>,
event_sender: broadcast::Sender<IdmEvent>,
requests: RequestMap,
request_backend_sender: broadcast::Sender<IdmRequest>,
_event_receiver: broadcast::Receiver<IdmEvent>,
mut receiver: Receiver<IdmPacket>,
channel: u64,
tx_sender: Sender<IdmTransportPacket>,
event_sender: broadcast::Sender<E>,
requests: OneshotRequestMap<R>,
request_streams: StreamRequestMap<R>,
request_update_streams: StreamRequestUpdateMap<R>,
request_backend_sender: broadcast::Sender<(u64, R)>,
request_stream_backend_sender: broadcast::Sender<IdmClientStreamResponseHandle<R>>,
_event_receiver: broadcast::Receiver<E>,
mut receiver: Receiver<IdmTransportPacket>,
) -> Result<()> {
loop {
select! {
x = backend.recv() => match x {
Ok(packet) => {
match packet.content {
Some(Content::Event(event)) => {
let _ = event_sender.send(event);
},
if packet.channel != channel {
continue;
}
Some(Content::Request(request)) => {
let _ = request_backend_sender.send(request);
},
Some(Content::Response(response)) => {
let mut requests = requests.lock().await;
if let Some(sender) = requests.remove(&response.id) {
drop(requests);
let _ = sender.send(response);
match packet.form() {
IdmTransportPacketForm::Event => {
if let Ok(event) = E::decode(&packet.data) {
let _ = event_sender.send(event);
}
},
IdmTransportPacketForm::Request => {
if let Ok(request) = R::decode(&packet.data) {
let _ = request_backend_sender.send((packet.id, request));
}
},
IdmTransportPacketForm::Response => {
let mut requests = requests.lock().await;
if let Some(sender) = requests.remove(&packet.id) {
drop(requests);
if let Ok(response) = R::Response::decode(&packet.data) {
let _ = sender.send(response);
}
}
},
IdmTransportPacketForm::StreamRequest => {
if let Ok(request) = R::decode(&packet.data) {
let mut update_streams = request_update_streams.lock().await;
let (sender, receiver) = mpsc::channel(100);
update_streams.insert(packet.id, sender.clone());
let handle = IdmClientStreamResponseHandle {
initial: request,
id: packet.id,
channel,
tx_sender: tx_sender.clone(),
receiver: Arc::new(Mutex::new(Some(receiver))),
};
let _ = request_stream_backend_sender.send(handle);
}
}
IdmTransportPacketForm::StreamRequestUpdate => {
if let Ok(request) = R::decode(&packet.data) {
let mut update_streams = request_update_streams.lock().await;
if let Some(stream) = update_streams.get_mut(&packet.id) {
let _ = stream.try_send(request);
}
}
}
IdmTransportPacketForm::StreamRequestClosed => {
let mut update_streams = request_update_streams.lock().await;
update_streams.remove(&packet.id);
}
IdmTransportPacketForm::StreamResponseUpdate => {
let requests = request_streams.lock().await;
if let Some(sender) = requests.get(&packet.id) {
if let Ok(response) = R::Response::decode(&packet.data) {
let _ = sender.try_send(response);
}
}
}
IdmTransportPacketForm::StreamResponseClosed => {
let mut requests = request_streams.lock().await;
requests.remove(&packet.id);
}
_ => {},
}
},

View File

@ -1,26 +1,66 @@
use anyhow::Result;
use prost::Message;
use prost_types::{ListValue, Value};
include!(concat!(env!("OUT_DIR"), "/krata.bus.idm.rs"));
use super::serialize::{IdmRequest, IdmSerializable};
include!(concat!(env!("OUT_DIR"), "/krata.idm.internal.rs"));
pub const INTERNAL_IDM_CHANNEL: u64 = 0;
impl IdmSerializable for Event {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
impl IdmSerializable for Request {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
impl IdmRequest for Request {
type Response = Response;
}
impl IdmSerializable for Response {
fn encode(&self) -> Result<Vec<u8>> {
Ok(self.encode_to_vec())
}
fn decode(bytes: &[u8]) -> Result<Self> {
Ok(<Self as prost::Message>::decode(bytes)?)
}
}
pub trait AsIdmMetricValue {
fn as_metric_value(&self) -> Value;
}
impl IdmMetricNode {
pub fn structural<N: AsRef<str>>(name: N, children: Vec<IdmMetricNode>) -> IdmMetricNode {
IdmMetricNode {
impl MetricNode {
pub fn structural<N: AsRef<str>>(name: N, children: Vec<MetricNode>) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: None,
format: IdmMetricFormat::Unknown.into(),
format: MetricFormat::Unknown.into(),
children,
}
}
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> IdmMetricNode {
IdmMetricNode {
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: Some(value.as_metric_value()),
format: IdmMetricFormat::Unknown.into(),
format: MetricFormat::Unknown.into(),
children: vec![],
}
}
@ -28,9 +68,9 @@ impl IdmMetricNode {
pub fn value<N: AsRef<str>, V: AsIdmMetricValue>(
name: N,
value: V,
format: IdmMetricFormat,
) -> IdmMetricNode {
IdmMetricNode {
format: MetricFormat,
) -> MetricNode {
MetricNode {
name: name.as_ref().to_string(),
value: Some(value.as_metric_value()),
format: format.into(),

View File

@ -1,3 +1,5 @@
#[cfg(unix)]
pub mod client;
pub use crate::bus::idm as protocol;
pub mod internal;
pub mod serialize;
pub mod transport;

View File

@ -0,0 +1,10 @@
use anyhow::Result;
pub trait IdmSerializable: Sized + Clone + Send + Sync + 'static {
fn decode(bytes: &[u8]) -> Result<Self>;
fn encode(&self) -> Result<Vec<u8>>;
}
pub trait IdmRequest: IdmSerializable {
type Response: IdmSerializable;
}

View File

@ -0,0 +1 @@
include!(concat!(env!("OUT_DIR"), "/krata.idm.transport.rs"));

View File

@ -1,7 +1,6 @@
use once_cell::sync::Lazy;
use prost_reflect::DescriptorPool;
pub mod bus;
pub mod v1;
pub mod client;

View File

@ -1,6 +1,6 @@
[package]
name = "krata-network"
description = "Networking services for the krata hypervisor."
description = "Networking services for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -16,7 +16,7 @@ clap = { workspace = true }
env_logger = { workspace = true }
etherparse = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.11" }
krata-advmac = { workspace = true }
libc = { workspace = true }
log = { workspace = true }

View File

@ -1,6 +1,6 @@
[package]
name = "krata-oci"
description = "OCI services for the krata hypervisor."
description = "OCI services for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true

View File

@ -5,10 +5,10 @@ use env_logger::Env;
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciPackedFormat},
progress::{OciProgress, OciProgressContext},
progress::OciProgressContext,
registry::OciPlatform,
};
use tokio::{fs, sync::mpsc::channel};
use tokio::fs;
#[tokio::main]
async fn main() -> Result<()> {
@ -22,27 +22,22 @@ async fn main() -> Result<()> {
fs::create_dir(&cache_dir).await?;
}
let (sender, mut receiver) = channel::<OciProgress>(100);
let (context, mut receiver) = OciProgressContext::create();
tokio::task::spawn(async move {
loop {
let mut progresses = Vec::new();
let _ = receiver.recv_many(&mut progresses, 100).await;
let Some(progress) = progresses.last() else {
continue;
};
if (receiver.changed().await).is_err() {
break;
}
let progress = receiver.borrow_and_update();
println!("phase {:?}", progress.phase);
for (id, layer) in &progress.layers {
println!(
"{} {:?} {} of {}",
id, layer.phase, layer.value, layer.total
)
println!("{} {:?} {:?}", id, layer.phase, layer.indication,)
}
}
});
let context = OciProgressContext::new(sender);
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current())?;
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current()).await?;
let packed = service
.request(image.clone(), OciPackedFormat::Squashfs, context)
.request(image.clone(), OciPackedFormat::Squashfs, false, context)
.await?;
println!(
"generated squashfs of {} to {}",

View File

@ -1,22 +1,25 @@
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
use crate::fetch::{OciImageFetcher, OciImageLayer, OciImageLayerReader, OciResolvedImage};
use crate::progress::OciBoundProgress;
use crate::schema::OciSchema;
use crate::vfs::{VfsNode, VfsTree};
use anyhow::{anyhow, Result};
use log::{debug, trace, warn};
use oci_spec::image::{ImageConfiguration, ImageManifest};
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::fs;
use tokio::io::AsyncRead;
use tokio_stream::StreamExt;
use tokio_tar::{Archive, Entry};
use uuid::Uuid;
pub struct OciImageAssembled {
pub digest: String,
pub manifest: ImageManifest,
pub config: ImageConfiguration,
pub descriptor: Descriptor,
pub manifest: OciSchema<ImageManifest>,
pub config: OciSchema<ImageConfiguration>,
pub vfs: Arc<VfsTree>,
pub tmp_dir: Option<PathBuf>,
}
@ -33,11 +36,12 @@ impl Drop for OciImageAssembled {
pub struct OciImageAssembler {
downloader: OciImageFetcher,
resolved: OciResolvedImage,
resolved: Option<OciResolvedImage>,
progress: OciBoundProgress,
work_dir: PathBuf,
disk_dir: PathBuf,
tmp_dir: Option<PathBuf>,
success: AtomicBool,
}
impl OciImageAssembler {
@ -81,11 +85,12 @@ impl OciImageAssembler {
Ok(OciImageAssembler {
downloader,
resolved,
resolved: Some(resolved),
progress,
work_dir,
disk_dir: target_dir,
tmp_dir,
success: AtomicBool::new(false),
})
}
@ -97,11 +102,11 @@ impl OciImageAssembler {
self.assemble_with(&layer_dir).await
}
async fn assemble_with(self, layer_dir: &Path) -> Result<OciImageAssembled> {
let local = self
.downloader
.download(self.resolved.clone(), layer_dir)
.await?;
async fn assemble_with(mut self, layer_dir: &Path) -> Result<OciImageAssembled> {
let Some(ref resolved) = self.resolved else {
return Err(anyhow!("resolved image was not available when expected"));
};
let local = self.downloader.download(resolved, layer_dir).await?;
let mut vfs = VfsTree::new();
for layer in &local.layers {
debug!(
@ -110,12 +115,14 @@ impl OciImageAssembler {
);
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, 0, 1);
progress.start_extracting_layer(&layer.digest);
})
.await;
debug!("process layer digest={}", &layer.digest,);
let mut archive = layer.archive().await?;
let mut entries = archive.entries()?;
let mut count = 0u64;
let mut size = 0u64;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
@ -129,14 +136,21 @@ impl OciImageAssembler {
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
.await?;
} else {
vfs.insert_tar_entry(&entry)?;
self.process_write_entry(&mut vfs, &mut entry, layer)
let reference = vfs.insert_tar_entry(&entry)?;
self.progress
.update(|progress| {
progress.extracting_layer(&layer.digest, &reference.name);
})
.await;
size += self
.process_write_entry(&mut vfs, &mut entry, layer)
.await?;
count += 1;
}
}
self.progress
.update(|progress| {
progress.extracted_layer(&layer.digest);
progress.extracted_layer(&layer.digest, count, size);
})
.await;
}
@ -145,19 +159,27 @@ impl OciImageAssembler {
fs::remove_file(&layer.path).await?;
}
}
Ok(OciImageAssembled {
let Some(resolved) = self.resolved.take() else {
return Err(anyhow!("resolved image was not available when expected"));
};
let assembled = OciImageAssembled {
vfs: Arc::new(vfs),
digest: self.resolved.digest,
manifest: self.resolved.manifest,
descriptor: resolved.descriptor,
digest: resolved.digest,
manifest: resolved.manifest,
config: local.config,
tmp_dir: self.tmp_dir,
})
tmp_dir: self.tmp_dir.clone(),
};
self.success.store(true, Ordering::Release);
Ok(assembled)
}
async fn process_whiteout_entry(
&self,
vfs: &mut VfsTree,
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
entry: &Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
name: &str,
layer: &OciImageLayer,
) -> Result<()> {
@ -198,11 +220,11 @@ impl OciImageAssembler {
async fn process_write_entry(
&self,
vfs: &mut VfsTree,
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
entry: &mut Entry<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>>,
layer: &OciImageLayer,
) -> Result<()> {
) -> Result<u64> {
if !entry.header().entry_type().is_file() {
return Ok(());
return Ok(0);
}
trace!(
"unpack entry layer={} path={:?} type={:?}",
@ -218,7 +240,19 @@ impl OciImageAssembler {
.await?
.ok_or(anyhow!("unpack did not return a path"))?;
vfs.set_disk_path(&entry.path()?, &path)?;
Ok(())
Ok(entry.header().size()?)
}
}
impl Drop for OciImageAssembler {
fn drop(&mut self) {
if !self.success.load(Ordering::Acquire) {
if let Some(tmp_dir) = self.tmp_dir.clone() {
tokio::task::spawn(async move {
let _ = fs::remove_dir_all(tmp_dir).await;
});
}
}
}
}

View File

@ -1,4 +1,7 @@
use crate::progress::{OciBoundProgress, OciProgressPhase};
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
schema::OciSchema,
};
use super::{
name::ImageName,
@ -6,6 +9,9 @@ use super::{
};
use std::{
fmt::Debug,
io::SeekFrom,
os::unix::fs::MetadataExt,
path::{Path, PathBuf},
pin::Pin,
};
@ -14,12 +20,13 @@ use anyhow::{anyhow, Result};
use async_compression::tokio::bufread::{GzipDecoder, ZstdDecoder};
use log::debug;
use oci_spec::image::{
Descriptor, ImageConfiguration, ImageIndex, ImageManifest, MediaType, ToDockerV2S2,
Descriptor, DescriptorBuilder, ImageConfiguration, ImageIndex, ImageManifest, MediaType,
ToDockerV2S2,
};
use serde::de::DeserializeOwned;
use tokio::{
fs::File,
io::{AsyncRead, AsyncReadExt, BufReader, BufWriter},
fs::{self, File},
io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, BufWriter},
};
use tokio_stream::StreamExt;
use tokio_tar::Archive;
@ -39,16 +46,43 @@ pub enum OciImageLayerCompression {
#[derive(Clone, Debug)]
pub struct OciImageLayer {
pub metadata: Descriptor,
pub path: PathBuf,
pub digest: String,
pub compression: OciImageLayerCompression,
}
#[async_trait::async_trait]
pub trait OciImageLayerReader: AsyncRead + Sync {
async fn position(&mut self) -> Result<u64>;
}
#[async_trait::async_trait]
impl OciImageLayerReader for BufReader<File> {
async fn position(&mut self) -> Result<u64> {
Ok(self.seek(SeekFrom::Current(0)).await?)
}
}
#[async_trait::async_trait]
impl OciImageLayerReader for GzipDecoder<BufReader<File>> {
async fn position(&mut self) -> Result<u64> {
self.get_mut().position().await
}
}
#[async_trait::async_trait]
impl OciImageLayerReader for ZstdDecoder<BufReader<File>> {
async fn position(&mut self) -> Result<u64> {
self.get_mut().position().await
}
}
impl OciImageLayer {
pub async fn decompress(&self) -> Result<Pin<Box<dyn AsyncRead + Send>>> {
pub async fn decompress(&self) -> Result<Pin<Box<dyn OciImageLayerReader + Send>>> {
let file = File::open(&self.path).await?;
let reader = BufReader::new(file);
let reader: Pin<Box<dyn AsyncRead + Send>> = match self.compression {
let reader: Pin<Box<dyn OciImageLayerReader + Send>> = match self.compression {
OciImageLayerCompression::None => Box::pin(reader),
OciImageLayerCompression::Gzip => Box::pin(GzipDecoder::new(reader)),
OciImageLayerCompression::Zstd => Box::pin(ZstdDecoder::new(reader)),
@ -56,7 +90,7 @@ impl OciImageLayer {
Ok(reader)
}
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn AsyncRead + Send>>>> {
pub async fn archive(&self) -> Result<Archive<Pin<Box<dyn OciImageLayerReader + Send>>>> {
let decompress = self.decompress().await?;
Ok(Archive::new(decompress))
}
@ -66,13 +100,14 @@ impl OciImageLayer {
pub struct OciResolvedImage {
pub name: ImageName,
pub digest: String,
pub manifest: ImageManifest,
pub descriptor: Descriptor,
pub manifest: OciSchema<ImageManifest>,
}
#[derive(Clone, Debug)]
pub struct OciLocalImage {
pub image: OciResolvedImage,
pub config: ImageConfiguration,
pub config: OciSchema<ImageConfiguration>,
pub layers: Vec<OciImageLayer>,
}
@ -89,10 +124,10 @@ impl OciImageFetcher {
}
}
async fn load_seed_json_blob<T: DeserializeOwned>(
async fn load_seed_json_blob<T: Clone + Debug + DeserializeOwned>(
&self,
descriptor: &Descriptor,
) -> Result<Option<T>> {
) -> Result<Option<OciSchema<T>>> {
let digest = descriptor.digest();
let Some((digest_type, digest_content)) = digest.split_once(':') else {
return Err(anyhow!("digest content was not properly formatted"));
@ -101,7 +136,10 @@ impl OciImageFetcher {
self.load_seed_json(&want).await
}
async fn load_seed_json<T: DeserializeOwned>(&self, want: &str) -> Result<Option<T>> {
async fn load_seed_json<T: Clone + Debug + DeserializeOwned>(
&self,
want: &str,
) -> Result<Option<OciSchema<T>>> {
let Some(ref seed) = self.seed else {
return Ok(None);
};
@ -113,10 +151,10 @@ impl OciImageFetcher {
let mut entry = entry?;
let path = String::from_utf8(entry.path_bytes().to_vec())?;
if path == want {
let mut content = String::new();
entry.read_to_string(&mut content).await?;
let data = serde_json::from_str::<T>(&content)?;
return Ok(Some(data));
let mut content = Vec::new();
entry.read_to_end(&mut content).await?;
let item = serde_json::from_slice::<T>(&content)?;
return Ok(Some(OciSchema::new(content, item)));
}
}
Ok(None)
@ -154,7 +192,7 @@ impl OciImageFetcher {
if let Some(index) = self.load_seed_json::<ImageIndex>("index.json").await? {
let mut found: Option<&Descriptor> = None;
for manifest in index.manifests() {
for manifest in index.item().manifests() {
let Some(annotations) = manifest.annotations() else {
continue;
};
@ -179,6 +217,13 @@ impl OciImageFetcher {
continue;
}
}
if let Some(ref digest) = image.digest {
if digest != manifest.digest() {
continue;
}
}
found = Some(manifest);
break;
}
@ -192,6 +237,7 @@ impl OciImageFetcher {
);
return Ok(OciResolvedImage {
name: image,
descriptor: found.clone(),
digest: found.digest().clone(),
manifest,
});
@ -200,11 +246,20 @@ impl OciImageFetcher {
}
let mut client = OciRegistryClient::new(image.registry_url()?, self.platform.clone())?;
let (manifest, digest) = client
.get_manifest_with_digest(&image.name, &image.reference)
let (manifest, descriptor, digest) = client
.get_manifest_with_digest(&image.name, image.reference.as_ref(), image.digest.as_ref())
.await?;
let descriptor = descriptor.unwrap_or_else(|| {
DescriptorBuilder::default()
.media_type(MediaType::ImageManifest)
.size(manifest.raw().len() as i64)
.digest(digest.clone())
.build()
.unwrap()
});
Ok(OciResolvedImage {
name: image,
descriptor,
digest,
manifest,
})
@ -212,41 +267,44 @@ impl OciImageFetcher {
pub async fn download(
&self,
image: OciResolvedImage,
image: &OciResolvedImage,
layer_dir: &Path,
) -> Result<OciLocalImage> {
let config: ImageConfiguration;
let config: OciSchema<ImageConfiguration>;
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::ConfigAcquire;
progress.phase = OciProgressPhase::ConfigDownload;
})
.await;
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
if let Some(seeded) = self
.load_seed_json_blob::<ImageConfiguration>(image.manifest.config())
.load_seed_json_blob::<ImageConfiguration>(image.manifest.item().config())
.await?
{
config = seeded;
} else {
let config_bytes = client
.get_blob(&image.name.name, image.manifest.config())
.get_blob(&image.name.name, image.manifest.item().config())
.await?;
config = serde_json::from_slice(&config_bytes)?;
config = OciSchema::new(
config_bytes.to_vec(),
serde_json::from_slice(&config_bytes)?,
);
}
self.progress
.update(|progress| {
progress.phase = OciProgressPhase::LayerAcquire;
progress.phase = OciProgressPhase::LayerDownload;
for layer in image.manifest.layers() {
progress.add_layer(layer.digest(), layer.size() as usize);
for layer in image.manifest.item().layers() {
progress.add_layer(layer.digest());
}
})
.await;
let mut layers = Vec::new();
for layer in image.manifest.layers() {
for layer in image.manifest.item().layers() {
self.progress
.update(|progress| {
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
progress.downloading_layer(layer.digest(), 0, layer.size() as u64);
})
.await;
layers.push(
@ -255,12 +313,12 @@ impl OciImageFetcher {
);
self.progress
.update(|progress| {
progress.downloaded_layer(layer.digest());
progress.downloaded_layer(layer.digest(), layer.size() as u64);
})
.await;
}
Ok(OciLocalImage {
image,
image: image.clone(),
config,
layers,
})
@ -294,6 +352,12 @@ impl OciImageFetcher {
}
}
let metadata = fs::metadata(&layer_path).await?;
if layer.size() as u64 != metadata.size() {
return Err(anyhow!("layer size differs from size in manifest",));
}
let mut media_type = layer.media_type().clone();
// docker layer compatibility
@ -308,6 +372,7 @@ impl OciImageFetcher {
other => return Err(anyhow!("found layer with unknown media type: {}", other)),
};
Ok(OciImageLayer {
metadata: layer.clone(),
path: layer_path,
digest: layer.digest().clone(),
compression,

View File

@ -4,4 +4,5 @@ pub mod name;
pub mod packer;
pub mod progress;
pub mod registry;
pub mod schema;
pub mod vfs;

View File

@ -2,27 +2,39 @@ use anyhow::Result;
use std::fmt;
use url::Url;
const DOCKER_HUB_MIRROR: &str = "mirror.gcr.io";
const DEFAULT_IMAGE_TAG: &str = "latest";
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ImageName {
pub hostname: String,
pub port: Option<u16>,
pub name: String,
pub reference: String,
pub reference: Option<String>,
pub digest: Option<String>,
}
impl fmt::Display for ImageName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(port) = self.port {
write!(
f,
"{}:{}/{}:{}",
self.hostname, port, self.name, self.reference
)
let mut suffix = String::new();
if let Some(ref reference) = self.reference {
suffix.push(':');
suffix.push_str(reference);
}
if let Some(ref digest) = self.digest {
suffix.push('@');
suffix.push_str(digest);
}
if ImageName::DOCKER_HUB_MIRROR == self.hostname && self.port.is_none() {
if self.name.starts_with("library/") {
write!(f, "{}{}", &self.name[8..], suffix)
} else {
write!(f, "{}{}", self.name, suffix)
}
} else if let Some(port) = self.port {
write!(f, "{}:{}/{}{}", self.hostname, port, self.name, suffix)
} else {
write!(f, "{}/{}:{}", self.hostname, self.name, self.reference)
write!(f, "{}/{}{}", self.hostname, self.name, suffix)
}
}
}
@ -35,13 +47,21 @@ impl Default for ImageName {
}
impl ImageName {
pub const DOCKER_HUB_MIRROR: &'static str = "mirror.gcr.io";
pub const DEFAULT_IMAGE_TAG: &'static str = "latest";
pub fn parse(name: &str) -> Result<Self> {
let full_name = name.to_string();
let name = full_name.clone();
let (mut hostname, mut name) = name
.split_once('/')
.map(|x| (x.0.to_string(), x.1.to_string()))
.unwrap_or_else(|| (DOCKER_HUB_MIRROR.to_string(), format!("library/{}", name)));
.unwrap_or_else(|| {
(
ImageName::DOCKER_HUB_MIRROR.to_string(),
format!("library/{}", name),
)
});
// heuristic to find any docker hub image formats
// that may be in the hostname format. for example:
@ -49,7 +69,7 @@ impl ImageName {
// and neither will abc/hello/xyz:latest
if !hostname.contains('.') && full_name.chars().filter(|x| *x == '/').count() == 1 {
name = format!("{}/{}", hostname, name);
hostname = DOCKER_HUB_MIRROR.to_string();
hostname = ImageName::DOCKER_HUB_MIRROR.to_string();
}
let (hostname, port) = if let Some((hostname, port)) = hostname
@ -60,15 +80,54 @@ impl ImageName {
} else {
(hostname, None)
};
let (name, reference) = name
.split_once(':')
.map(|x| (x.0.to_string(), x.1.to_string()))
.unwrap_or((name.to_string(), DEFAULT_IMAGE_TAG.to_string()));
let name_has_digest = if name.contains('@') {
let digest_start = name.chars().position(|c| c == '@');
let ref_start = name.chars().position(|c| c == ':');
if let (Some(digest_start), Some(ref_start)) = (digest_start, ref_start) {
digest_start < ref_start
} else {
true
}
} else {
false
};
let (name, digest) = if name_has_digest {
name.split_once('@')
.map(|(name, digest)| (name.to_string(), Some(digest.to_string())))
.unwrap_or_else(|| (name, None))
} else {
(name, None)
};
let (name, reference) = if name.contains(':') {
name.split_once(':')
.map(|(name, reference)| (name.to_string(), Some(reference.to_string())))
.unwrap_or((name, None))
} else {
(name, None)
};
let (reference, digest) = if let Some(reference) = reference {
if let Some(digest) = digest {
(Some(reference), Some(digest))
} else {
reference
.split_once('@')
.map(|(reff, digest)| (Some(reff.to_string()), Some(digest.to_string())))
.unwrap_or_else(|| (Some(reference), None))
}
} else {
(None, digest)
};
Ok(ImageName {
hostname,
port,
name,
reference,
digest,
})
}

View File

@ -1,18 +1,22 @@
use std::{path::Path, process::Stdio, sync::Arc};
use std::{os::unix::fs::MetadataExt, path::Path, process::Stdio, sync::Arc};
use super::OciPackedFormat;
use crate::{
progress::{OciBoundProgress, OciProgressPhase},
vfs::VfsTree,
};
use crate::{progress::OciBoundProgress, vfs::VfsTree};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::{pin, process::Command, select};
use tokio::{
fs::{self, File},
io::BufWriter,
pin,
process::{Child, Command},
select,
};
#[derive(Debug, Clone, Copy)]
pub enum OciPackerBackendType {
MkSquashfs,
MkfsErofs,
Tar,
}
impl OciPackerBackendType {
@ -20,6 +24,7 @@ impl OciPackerBackendType {
match self {
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
OciPackerBackendType::Tar => OciPackedFormat::Tar,
}
}
@ -31,6 +36,7 @@ impl OciPackerBackendType {
OciPackerBackendType::MkfsErofs => {
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
}
OciPackerBackendType::Tar => Box::new(OciPackerTar {}) as Box<dyn OciPackerBackend>,
}
}
}
@ -47,13 +53,11 @@ impl OciPackerBackend for OciPackerMkSquashfs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress.start_packing();
})
.await;
let mut child = Command::new("mksquashfs")
let child = Command::new("mksquashfs")
.arg("-")
.arg(file)
.arg("-comp")
@ -63,7 +67,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let mut child = ChildProcessKillGuard(child);
let stdin = child
.0
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
@ -74,7 +80,7 @@ impl OciPackerBackend for OciPackerMkSquashfs {
}
Ok(())
}));
let wait = child.wait();
let wait = child.0.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
@ -110,12 +116,9 @@ impl OciPackerBackend for OciPackerMkSquashfs {
status.code().unwrap()
))
} else {
let metadata = fs::metadata(&file).await?;
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
})
.update(|progress| progress.complete(metadata.size()))
.await;
Ok(())
}
@ -126,32 +129,32 @@ pub struct OciPackerMkfsErofs {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerMkfsErofs {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 0;
progress.start_packing();
})
.await;
let mut child = Command::new("mkfs.erofs")
let child = Command::new("mkfs.erofs")
.arg("-L")
.arg("root")
.arg("--tar=-")
.arg(path)
.arg(file)
.stdin(Stdio::piped())
.stderr(Stdio::null())
.stdout(Stdio::null())
.spawn()?;
let mut child = ChildProcessKillGuard(child);
let stdin = child
.0
.stdin
.take()
.ok_or(anyhow!("unable to acquire stdin stream"))?;
let mut writer = Some(tokio::task::spawn(
async move { vfs.write_to_tar(stdin).await },
));
let wait = child.wait();
let wait = child.0.wait();
pin!(wait);
let status_result = loop {
if let Some(inner) = writer.as_mut() {
@ -188,14 +191,46 @@ impl OciPackerBackend for OciPackerMkfsErofs {
status.code().unwrap()
))
} else {
let metadata = fs::metadata(&file).await?;
progress
.update(|progress| {
progress.phase = OciProgressPhase::Packing;
progress.total = 1;
progress.value = 1;
progress.complete(metadata.size());
})
.await;
Ok(())
}
}
}
pub struct OciPackerTar {}
#[async_trait::async_trait]
impl OciPackerBackend for OciPackerTar {
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
progress
.update(|progress| {
progress.start_packing();
})
.await;
let output = File::create(file).await?;
let output = BufWriter::new(output);
vfs.write_to_tar(output).await?;
let metadata = fs::metadata(file).await?;
progress
.update(|progress| {
progress.complete(metadata.size());
})
.await;
Ok(())
}
}
struct ChildProcessKillGuard(Child);
impl Drop for ChildProcessKillGuard {
fn drop(&mut self) {
let _ = self.0.start_kill();
}
}

View File

@ -1,66 +1,121 @@
use crate::packer::{OciImagePacked, OciPackedFormat};
use crate::{
name::ImageName,
packer::{OciPackedFormat, OciPackedImage},
schema::OciSchema,
};
use anyhow::Result;
use log::debug;
use oci_spec::image::{ImageConfiguration, ImageManifest};
use std::path::{Path, PathBuf};
use tokio::fs;
use log::{debug, error};
use oci_spec::image::{
Descriptor, ImageConfiguration, ImageIndex, ImageIndexBuilder, ImageManifest, MediaType,
ANNOTATION_REF_NAME,
};
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use tokio::{fs, sync::RwLock};
#[derive(Clone)]
pub struct OciPackerCache {
cache_dir: PathBuf,
index: Arc<RwLock<ImageIndex>>,
}
const ANNOTATION_IMAGE_NAME: &str = "io.containerd.image.name";
const ANNOTATION_OCI_PACKER_FORMAT: &str = "dev.krata.oci.packer.format";
impl OciPackerCache {
pub fn new(cache_dir: &Path) -> Result<OciPackerCache> {
Ok(OciPackerCache {
pub async fn new(cache_dir: &Path) -> Result<OciPackerCache> {
let index = ImageIndexBuilder::default()
.schema_version(2u32)
.media_type(MediaType::ImageIndex)
.manifests(Vec::new())
.build()?;
let cache = OciPackerCache {
cache_dir: cache_dir.to_path_buf(),
})
index: Arc::new(RwLock::new(index)),
};
{
let mut mutex = cache.index.write().await;
*mutex = cache.load_index().await?;
}
Ok(cache)
}
pub async fn list(&self) -> Result<Vec<Descriptor>> {
let index = self.index.read().await;
Ok(index.manifests().clone())
}
pub async fn recall(
&self,
name: ImageName,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
) -> Result<Option<OciPackedImage>> {
let index = self.index.read().await;
let mut descriptor: Option<Descriptor> = None;
for manifest in index.manifests() {
if manifest.digest() == digest
&& manifest
.annotations()
.as_ref()
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
.map(|x| x.as_str())
== Some(format.extension())
{
descriptor = Some(manifest.clone());
break;
}
}
let Some(descriptor) = descriptor else {
return Ok(None);
};
let mut fs_path = self.cache_dir.clone();
let mut config_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
fs_path.push(format!("{}.{}", digest, format.extension()));
manifest_path.push(format!("{}.manifest.json", digest));
config_path.push(format!("{}.config.json", digest));
Ok(
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
let image_metadata = fs::metadata(&fs_path).await?;
let manifest_metadata = fs::metadata(&manifest_path).await?;
let config_metadata = fs::metadata(&config_path).await?;
if image_metadata.is_file()
&& manifest_metadata.is_file()
&& config_metadata.is_file()
{
let manifest_text = fs::read_to_string(&manifest_path).await?;
let manifest: ImageManifest = serde_json::from_str(&manifest_text)?;
let config_text = fs::read_to_string(&config_path).await?;
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
debug!("cache hit digest={}", digest);
Some(OciImagePacked::new(
digest.to_string(),
fs_path.clone(),
format,
config,
manifest,
))
} else {
None
}
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
let image_metadata = fs::metadata(&fs_path).await?;
let manifest_metadata = fs::metadata(&manifest_path).await?;
let config_metadata = fs::metadata(&config_path).await?;
if image_metadata.is_file() && manifest_metadata.is_file() && config_metadata.is_file()
{
let manifest_bytes = fs::read(&manifest_path).await?;
let manifest: ImageManifest = serde_json::from_slice(&manifest_bytes)?;
let config_bytes = fs::read(&config_path).await?;
let config: ImageConfiguration = serde_json::from_slice(&config_bytes)?;
debug!("cache hit digest={}", digest);
Ok(Some(OciPackedImage::new(
name,
digest.to_string(),
fs_path.clone(),
format,
descriptor,
OciSchema::new(config_bytes, config),
OciSchema::new(manifest_bytes, manifest),
)))
} else {
debug!("cache miss digest={}", digest);
None
},
)
Ok(None)
}
} else {
debug!("cache miss digest={}", digest);
Ok(None)
}
}
pub async fn store(&self, packed: OciImagePacked) -> Result<OciImagePacked> {
pub async fn store(&self, packed: OciPackedImage) -> Result<OciPackedImage> {
let mut index = self.index.write().await;
let mut manifests = index.manifests().clone();
debug!("cache store digest={}", packed.digest);
let mut fs_path = self.cache_dir.clone();
let mut manifest_path = self.cache_dir.clone();
@ -68,17 +123,98 @@ impl OciPackerCache {
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
manifest_path.push(format!("{}.manifest.json", packed.digest));
config_path.push(format!("{}.config.json", packed.digest));
fs::copy(&packed.path, &fs_path).await?;
let manifest_text = serde_json::to_string_pretty(&packed.manifest)?;
fs::write(&manifest_path, manifest_text).await?;
let config_text = serde_json::to_string_pretty(&packed.config)?;
fs::write(&config_path, config_text).await?;
Ok(OciImagePacked::new(
if fs::rename(&packed.path, &fs_path).await.is_err() {
fs::copy(&packed.path, &fs_path).await?;
fs::remove_file(&packed.path).await?;
}
fs::write(&config_path, packed.config.raw()).await?;
fs::write(&manifest_path, packed.manifest.raw()).await?;
manifests.retain(|item| {
if item.digest() != &packed.digest {
return true;
}
let Some(format) = item
.annotations()
.as_ref()
.and_then(|x| x.get(ANNOTATION_OCI_PACKER_FORMAT))
.map(|x| x.as_str())
else {
return true;
};
if format != packed.format.extension() {
return true;
}
false
});
let mut descriptor = packed.descriptor.clone();
let mut annotations = descriptor.annotations().clone().unwrap_or_default();
annotations.insert(
ANNOTATION_OCI_PACKER_FORMAT.to_string(),
packed.format.extension().to_string(),
);
let image_name = packed.name.to_string();
annotations.insert(ANNOTATION_IMAGE_NAME.to_string(), image_name);
let image_ref = packed.name.reference.clone();
if let Some(image_ref) = image_ref {
annotations.insert(ANNOTATION_REF_NAME.to_string(), image_ref);
}
descriptor.set_annotations(Some(annotations));
manifests.push(descriptor.clone());
index.set_manifests(manifests);
self.save_index(&index).await?;
let packed = OciPackedImage::new(
packed.name,
packed.digest,
fs_path.clone(),
packed.format,
descriptor,
packed.config,
packed.manifest,
))
);
Ok(packed)
}
async fn save_empty_index(&self) -> Result<ImageIndex> {
let index = ImageIndexBuilder::default()
.schema_version(2u32)
.media_type(MediaType::ImageIndex)
.manifests(Vec::new())
.build()?;
self.save_index(&index).await?;
Ok(index)
}
async fn load_index(&self) -> Result<ImageIndex> {
let mut index_path = self.cache_dir.clone();
index_path.push("index.json");
if !index_path.exists() {
self.save_empty_index().await?;
}
let content = fs::read_to_string(&index_path).await?;
let index = match serde_json::from_str::<ImageIndex>(&content) {
Ok(index) => index,
Err(error) => {
error!("image index was corrupted, creating a new one: {}", error);
self.save_empty_index().await?
}
};
Ok(index)
}
async fn save_index(&self, index: &ImageIndex) -> Result<()> {
let mut encoded = serde_json::to_string_pretty(index)?;
encoded.push('\n');
let mut index_path = self.cache_dir.clone();
index_path.push("index.json");
fs::write(&index_path, encoded).await?;
Ok(())
}
}

View File

@ -1,17 +1,20 @@
use std::path::PathBuf;
use crate::{name::ImageName, schema::OciSchema};
use self::backend::OciPackerBackendType;
use oci_spec::image::{ImageConfiguration, ImageManifest};
use oci_spec::image::{Descriptor, ImageConfiguration, ImageManifest};
pub mod backend;
pub mod cache;
pub mod service;
#[derive(Debug, Default, Clone, Copy)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash)]
pub enum OciPackedFormat {
#[default]
Squashfs,
Erofs,
Tar,
}
impl OciPackedFormat {
@ -19,6 +22,7 @@ impl OciPackedFormat {
match self {
OciPackedFormat::Squashfs => "squashfs",
OciPackedFormat::Erofs => "erofs",
OciPackedFormat::Tar => "tar",
}
}
@ -26,31 +30,38 @@ impl OciPackedFormat {
match self {
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
OciPackedFormat::Tar => OciPackerBackendType::Tar,
}
}
}
#[derive(Clone)]
pub struct OciImagePacked {
pub struct OciPackedImage {
pub name: ImageName,
pub digest: String,
pub path: PathBuf,
pub format: OciPackedFormat,
pub config: ImageConfiguration,
pub manifest: ImageManifest,
pub descriptor: Descriptor,
pub config: OciSchema<ImageConfiguration>,
pub manifest: OciSchema<ImageManifest>,
}
impl OciImagePacked {
impl OciPackedImage {
pub fn new(
name: ImageName,
digest: String,
path: PathBuf,
format: OciPackedFormat,
config: ImageConfiguration,
manifest: ImageManifest,
) -> OciImagePacked {
OciImagePacked {
descriptor: Descriptor,
config: OciSchema<ImageConfiguration>,
manifest: OciSchema<ImageManifest>,
) -> OciPackedImage {
OciPackedImage {
name,
digest,
path,
format,
descriptor,
config,
manifest,
}

View File

@ -1,58 +1,201 @@
use std::path::{Path, PathBuf};
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Display,
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::{anyhow, Result};
use oci_spec::image::Descriptor;
use tokio::{
sync::{watch, Mutex},
task::JoinHandle,
};
use crate::{
assemble::OciImageAssembler,
fetch::OciImageFetcher,
fetch::{OciImageFetcher, OciResolvedImage},
name::ImageName,
progress::{OciBoundProgress, OciProgress, OciProgressContext},
registry::OciPlatform,
};
use super::{cache::OciPackerCache, OciImagePacked, OciPackedFormat};
use log::{error, info, warn};
use super::{cache::OciPackerCache, OciPackedFormat, OciPackedImage};
pub struct OciPackerTask {
progress: OciBoundProgress,
watch: watch::Sender<Option<Result<OciPackedImage>>>,
task: JoinHandle<()>,
}
#[derive(Clone)]
pub struct OciPackerService {
seed: Option<PathBuf>,
platform: OciPlatform,
cache: OciPackerCache,
tasks: Arc<Mutex<HashMap<OciPackerTaskKey, OciPackerTask>>>,
}
impl OciPackerService {
pub fn new(
pub async fn new(
seed: Option<PathBuf>,
cache_dir: &Path,
platform: OciPlatform,
) -> Result<OciPackerService> {
Ok(OciPackerService {
seed,
cache: OciPackerCache::new(cache_dir)?,
cache: OciPackerCache::new(cache_dir).await?,
platform,
tasks: Arc::new(Mutex::new(HashMap::new())),
})
}
pub async fn list(&self) -> Result<Vec<Descriptor>> {
self.cache.list().await
}
pub async fn recall(
&self,
digest: &str,
format: OciPackedFormat,
) -> Result<Option<OciImagePacked>> {
self.cache.recall(digest, format).await
) -> Result<Option<OciPackedImage>> {
if digest.contains('/') || digest.contains('\\') || digest.contains("..") {
return Ok(None);
}
self.cache
.recall(ImageName::parse("cached:latest")?, digest, format)
.await
}
pub async fn request(
&self,
name: ImageName,
format: OciPackedFormat,
overwrite: bool,
progress_context: OciProgressContext,
) -> Result<OciImagePacked> {
) -> Result<OciPackedImage> {
let progress = OciProgress::new();
let progress = OciBoundProgress::new(progress_context.clone(), progress);
let fetcher =
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
let resolved = fetcher.resolve(name).await?;
if let Some(cached) = self.cache.recall(&resolved.digest, format).await? {
return Ok(cached);
let resolved = fetcher.resolve(name.clone()).await?;
let key = OciPackerTaskKey {
digest: resolved.digest.clone(),
format,
};
let (progress_copy_task, mut receiver) = match self.tasks.lock().await.entry(key.clone()) {
Entry::Occupied(entry) => {
let entry = entry.get();
(
Some(entry.progress.also_update(progress_context).await),
entry.watch.subscribe(),
)
}
Entry::Vacant(entry) => {
let task = self
.clone()
.launch(
name,
key.clone(),
format,
overwrite,
resolved,
fetcher,
progress.clone(),
)
.await;
let (watch, receiver) = watch::channel(None);
let task = OciPackerTask {
progress: progress.clone(),
task,
watch,
};
entry.insert(task);
(None, receiver)
}
};
let _progress_task_guard = scopeguard::guard(progress_copy_task, |task| {
if let Some(task) = task {
task.abort();
}
});
let _task_cancel_guard = scopeguard::guard(self.clone(), |service| {
service.maybe_cancel_task(key);
});
loop {
receiver.changed().await?;
let current = receiver.borrow_and_update();
if current.is_some() {
return current
.as_ref()
.map(|x| x.as_ref().map_err(|err| anyhow!("{}", err)).cloned())
.unwrap();
}
}
}
#[allow(clippy::too_many_arguments)]
async fn launch(
self,
name: ImageName,
key: OciPackerTaskKey,
format: OciPackedFormat,
overwrite: bool,
resolved: OciResolvedImage,
fetcher: OciImageFetcher,
progress: OciBoundProgress,
) -> JoinHandle<()> {
info!("started packer task {}", key);
tokio::task::spawn(async move {
let _task_drop_guard =
scopeguard::guard((key.clone(), self.clone()), |(key, service)| {
service.ensure_task_gone(key);
});
if let Err(error) = self
.task(
name,
key.clone(),
format,
overwrite,
resolved,
fetcher,
progress,
)
.await
{
self.finish(&key, Err(error)).await;
}
})
}
#[allow(clippy::too_many_arguments)]
async fn task(
&self,
name: ImageName,
key: OciPackerTaskKey,
format: OciPackedFormat,
overwrite: bool,
resolved: OciResolvedImage,
fetcher: OciImageFetcher,
progress: OciBoundProgress,
) -> Result<()> {
if !overwrite {
if let Some(cached) = self
.cache
.recall(name.clone(), &resolved.digest, format)
.await?
{
self.finish(&key, Ok(cached)).await;
return Ok(());
}
}
let assembler =
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
@ -67,15 +210,69 @@ impl OciPackerService {
packer
.pack(progress, assembled.vfs.clone(), &target)
.await?;
let packed = OciImagePacked::new(
let packed = OciPackedImage::new(
name,
assembled.digest.clone(),
file,
format,
assembled.descriptor.clone(),
assembled.config.clone(),
assembled.manifest.clone(),
);
let packed = self.cache.store(packed).await?;
Ok(packed)
self.finish(&key, Ok(packed)).await;
Ok(())
}
async fn finish(&self, key: &OciPackerTaskKey, result: Result<OciPackedImage>) {
let Some(task) = self.tasks.lock().await.remove(key) else {
error!("packer task {} was not found when task completed", key);
return;
};
match result.as_ref() {
Ok(_) => {
info!("completed packer task {}", key);
}
Err(err) => {
warn!("packer task {} failed: {}", key, err);
}
}
task.watch.send_replace(Some(result));
}
fn maybe_cancel_task(self, key: OciPackerTaskKey) {
tokio::task::spawn(async move {
let tasks = self.tasks.lock().await;
if let Some(task) = tasks.get(&key) {
if task.watch.is_closed() {
task.task.abort();
}
}
});
}
fn ensure_task_gone(self, key: OciPackerTaskKey) {
tokio::task::spawn(async move {
let mut tasks = self.tasks.lock().await;
if let Some(task) = tasks.remove(&key) {
warn!("aborted packer task {}", key);
task.watch.send_replace(Some(Err(anyhow!("task aborted"))));
}
});
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
struct OciPackerTaskKey {
digest: String,
format: OciPackedFormat,
}
impl Display for OciPackerTaskKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{}:{}", self.digest, self.format.extension()))
}
}

View File

@ -1,14 +1,16 @@
use std::sync::Arc;
use indexmap::IndexMap;
use tokio::sync::{mpsc::Sender, Mutex};
use std::sync::Arc;
use tokio::{
sync::{watch, Mutex},
task::JoinHandle,
};
#[derive(Clone, Debug)]
pub struct OciProgress {
pub phase: OciProgressPhase,
pub digest: Option<String>,
pub layers: IndexMap<String, OciProgressLayer>,
pub value: u64,
pub total: u64,
pub indication: OciProgressIndication,
}
impl Default for OciProgress {
@ -20,72 +22,146 @@ impl Default for OciProgress {
impl OciProgress {
pub fn new() -> Self {
OciProgress {
phase: OciProgressPhase::Resolving,
phase: OciProgressPhase::Started,
digest: None,
layers: IndexMap::new(),
value: 0,
total: 1,
indication: OciProgressIndication::Hidden,
}
}
pub fn add_layer(&mut self, id: &str, size: usize) {
pub fn start_resolving(&mut self) {
self.phase = OciProgressPhase::Resolving;
self.indication = OciProgressIndication::Spinner { message: None };
}
pub fn resolved(&mut self, digest: &str) {
self.digest = Some(digest.to_string());
self.indication = OciProgressIndication::Hidden;
}
pub fn add_layer(&mut self, id: &str) {
self.layers.insert(
id.to_string(),
OciProgressLayer {
id: id.to_string(),
phase: OciProgressLayerPhase::Waiting,
value: 0,
total: size as u64,
indication: OciProgressIndication::Spinner { message: None },
},
);
}
pub fn downloading_layer(&mut self, id: &str, downloaded: usize, total: usize) {
pub fn downloading_layer(&mut self, id: &str, downloaded: u64, total: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Downloading;
entry.value = downloaded as u64;
entry.total = total as u64;
entry.indication = OciProgressIndication::ProgressBar {
message: None,
current: downloaded,
total,
bytes: true,
};
}
}
pub fn downloaded_layer(&mut self, id: &str) {
pub fn downloaded_layer(&mut self, id: &str, total: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Downloaded;
entry.value = entry.total;
entry.indication = OciProgressIndication::Completed {
message: None,
total: Some(total),
bytes: true,
};
}
}
pub fn extracting_layer(&mut self, id: &str, extracted: usize, total: usize) {
pub fn start_assemble(&mut self) {
self.phase = OciProgressPhase::Assemble;
self.indication = OciProgressIndication::Hidden;
}
pub fn start_extracting_layer(&mut self, id: &str) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracting;
entry.value = extracted as u64;
entry.total = total as u64;
entry.indication = OciProgressIndication::Spinner { message: None };
}
}
pub fn extracted_layer(&mut self, id: &str) {
pub fn extracting_layer(&mut self, id: &str, file: &str) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracting;
entry.indication = OciProgressIndication::Spinner {
message: Some(file.to_string()),
};
}
}
pub fn extracted_layer(&mut self, id: &str, count: u64, total_size: u64) {
if let Some(entry) = self.layers.get_mut(id) {
entry.phase = OciProgressLayerPhase::Extracted;
entry.value = entry.total;
entry.indication = OciProgressIndication::Completed {
message: Some(format!("{} files", count)),
total: Some(total_size),
bytes: true,
};
}
}
pub fn start_packing(&mut self) {
self.phase = OciProgressPhase::Pack;
for layer in self.layers.values_mut() {
layer.indication = OciProgressIndication::Hidden;
}
self.indication = OciProgressIndication::Spinner { message: None };
}
pub fn complete(&mut self, size: u64) {
self.phase = OciProgressPhase::Complete;
self.indication = OciProgressIndication::Completed {
message: None,
total: Some(size),
bytes: true,
}
}
}
#[derive(Clone, Debug)]
pub enum OciProgressPhase {
Started,
Resolving,
Resolved,
ConfigAcquire,
LayerAcquire,
Packing,
ConfigDownload,
LayerDownload,
Assemble,
Pack,
Complete,
}
#[derive(Clone, Debug)]
pub enum OciProgressIndication {
Hidden,
ProgressBar {
message: Option<String>,
current: u64,
total: u64,
bytes: bool,
},
Spinner {
message: Option<String>,
},
Completed {
message: Option<String>,
total: Option<u64>,
bytes: bool,
},
}
#[derive(Clone, Debug)]
pub struct OciProgressLayer {
pub id: String,
pub phase: OciProgressLayerPhase,
pub value: u64,
pub total: u64,
pub indication: OciProgressIndication,
}
#[derive(Clone, Debug)]
@ -99,16 +175,25 @@ pub enum OciProgressLayerPhase {
#[derive(Clone)]
pub struct OciProgressContext {
sender: Sender<OciProgress>,
sender: watch::Sender<OciProgress>,
}
impl OciProgressContext {
pub fn new(sender: Sender<OciProgress>) -> OciProgressContext {
pub fn create() -> (OciProgressContext, watch::Receiver<OciProgress>) {
let (sender, receiver) = watch::channel(OciProgress::new());
(OciProgressContext::new(sender), receiver)
}
pub fn new(sender: watch::Sender<OciProgress>) -> OciProgressContext {
OciProgressContext { sender }
}
pub fn update(&self, progress: &OciProgress) {
let _ = self.sender.try_send(progress.clone());
let _ = self.sender.send(progress.clone());
}
pub fn subscribe(&self) -> watch::Receiver<OciProgress> {
self.sender.subscribe()
}
}
@ -137,4 +222,17 @@ impl OciBoundProgress {
function(&mut progress);
self.context.update(&progress);
}
pub async fn also_update(&self, context: OciProgressContext) -> JoinHandle<()> {
let progress = self.instance.lock().await.clone();
context.update(&progress);
let mut receiver = self.context.subscribe();
tokio::task::spawn(async move {
while (receiver.changed().await).is_ok() {
context
.sender
.send_replace(receiver.borrow_and_update().clone());
}
})
}
}

View File

@ -7,7 +7,7 @@ use reqwest::{Client, RequestBuilder, Response, StatusCode};
use tokio::{fs::File, io::AsyncWriteExt};
use url::Url;
use crate::progress::OciBoundProgress;
use crate::{name::ImageName, progress::OciBoundProgress, schema::OciSchema};
#[derive(Clone, Debug)]
pub struct OciPlatform {
@ -149,24 +149,20 @@ impl OciRegistryClient {
))?;
let mut response = self.call(self.agent.get(url.as_str())).await?;
let mut size: u64 = 0;
let mut last_progress_size: u64 = 0;
while let Some(chunk) = response.chunk().await? {
dest.write_all(&chunk).await?;
size += chunk.len() as u64;
if (size - last_progress_size) > (5 * 1024 * 1024) {
last_progress_size = size;
if let Some(ref progress) = progress {
progress
.update(|progress| {
progress.downloading_layer(
descriptor.digest(),
size as usize,
descriptor.size() as usize,
);
})
.await;
}
if let Some(ref progress) = progress {
progress
.update(|progress| {
progress.downloading_layer(
descriptor.digest(),
size,
descriptor.size() as u64,
);
})
.await;
}
}
Ok(size)
@ -176,11 +172,11 @@ impl OciRegistryClient {
&mut self,
name: N,
reference: R,
) -> Result<(ImageManifest, String)> {
) -> Result<(OciSchema<ImageManifest>, String)> {
let url = self.url.join(&format!(
"/v2/{}/manifests/{}",
name.as_ref(),
reference.as_ref()
reference.as_ref(),
))?;
let accept = format!(
"{}, {}, {}, {}",
@ -198,20 +194,28 @@ impl OciRegistryClient {
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
.to_str()?
.to_string();
let manifest = serde_json::from_str(&response.text().await?)?;
Ok((manifest, digest))
let bytes = response.bytes().await?;
let manifest = serde_json::from_slice(&bytes)?;
Ok((OciSchema::new(bytes.to_vec(), manifest), digest))
}
pub async fn get_manifest_with_digest<N: AsRef<str>, R: AsRef<str>>(
&mut self,
name: N,
reference: R,
) -> Result<(ImageManifest, String)> {
let url = self.url.join(&format!(
"/v2/{}/manifests/{}",
name.as_ref(),
reference.as_ref()
))?;
reference: Option<R>,
digest: Option<N>,
) -> Result<(OciSchema<ImageManifest>, Option<Descriptor>, String)> {
let what = digest
.as_ref()
.map(|x| x.as_ref().to_string())
.unwrap_or_else(|| {
reference
.map(|x| x.as_ref().to_string())
.unwrap_or_else(|| ImageName::DEFAULT_IMAGE_TAG.to_string())
});
let url = self
.url
.join(&format!("/v2/{}/manifests/{}", name.as_ref(), what,))?;
let accept = format!(
"{}, {}, {}, {}",
MediaType::ImageManifest.to_docker_v2s2()?,
@ -234,18 +238,21 @@ impl OciRegistryClient {
let descriptor = self
.pick_manifest(index)
.ok_or_else(|| anyhow!("unable to pick manifest from index"))?;
return self
let (manifest, digest) = self
.get_raw_manifest_with_digest(name, descriptor.digest())
.await;
.await?;
return Ok((manifest, Some(descriptor), digest));
}
let digest = response
.headers()
.get("Docker-Content-Digest")
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?
.to_str()?
.to_string();
let manifest = serde_json::from_str(&response.text().await?)?;
Ok((manifest, digest))
.and_then(|x| x.to_str().ok())
.map(|x| x.to_string())
.or_else(|| digest.map(|x: N| x.as_ref().to_string()))
.ok_or_else(|| anyhow!("fetching manifest did not yield a content digest"))?;
let bytes = response.bytes().await?;
let manifest = serde_json::from_slice(&bytes)?;
Ok((OciSchema::new(bytes.to_vec(), manifest), None, digest))
}
fn pick_manifest(&mut self, index: ImageIndex) -> Option<Descriptor> {

29
crates/oci/src/schema.rs Normal file
View File

@ -0,0 +1,29 @@
use std::fmt::Debug;
#[derive(Clone, Debug)]
pub struct OciSchema<T: Clone + Debug> {
raw: Vec<u8>,
item: T,
}
impl<T: Clone + Debug> OciSchema<T> {
pub fn new(raw: Vec<u8>, item: T) -> OciSchema<T> {
OciSchema { raw, item }
}
pub fn raw(&self) -> &[u8] {
&self.raw
}
pub fn item(&self) -> &T {
&self.item
}
pub fn into_raw(self) -> Vec<u8> {
self.raw
}
pub fn into_item(self) -> T {
self.item
}
}

View File

@ -194,7 +194,7 @@ impl VfsTree {
}
}
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<&VfsNode> {
let mut meta = VfsNode::from(entry)?;
let path = entry.path()?.to_path_buf();
let parent = if let Some(parent) = path.parent() {
@ -218,8 +218,11 @@ impl VfsTree {
meta.children = old.children;
}
}
parent.children.push(meta);
Ok(())
parent.children.push(meta.clone());
let Some(reference) = parent.children.iter().find(|child| child.name == meta.name) else {
return Err(anyhow!("unable to find inserted child in vfs"));
};
Ok(reference)
}
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {

View File

@ -1,6 +1,6 @@
[package]
name = "krata-runtime"
description = "Runtime for running guests on the krata hypervisor."
description = "Runtime for running guests on the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -12,18 +12,20 @@ resolver = "2"
anyhow = { workspace = true }
backhand = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.9" }
krata = { path = "../krata", version = "^0.0.11" }
krata-advmac = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.9" }
krata-oci = { path = "../oci", version = "^0.0.11" }
log = { workspace = true }
loopdev-3 = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.9" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.9" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.9" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.11" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.11" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.11" }
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.11" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.11" }
walkdir = { workspace = true }
[lib]
name = "kratart"

View File

@ -1,21 +1,22 @@
use anyhow::Result;
use backhand::{FilesystemWriter, NodeHeader};
use backhand::compression::Compressor;
use backhand::{FilesystemCompressor, FilesystemWriter, NodeHeader};
use krata::launchcfg::LaunchInfo;
use krataoci::packer::OciImagePacked;
use krataoci::packer::OciPackedImage;
use log::trace;
use std::fs;
use std::fs::File;
use std::path::PathBuf;
use uuid::Uuid;
pub struct ConfigBlock<'a> {
pub image: &'a OciImagePacked,
pub struct ConfigBlock {
pub image: OciPackedImage,
pub file: PathBuf,
pub dir: PathBuf,
}
impl ConfigBlock<'_> {
pub fn new<'a>(uuid: &Uuid, image: &'a OciImagePacked) -> Result<ConfigBlock<'a>> {
impl ConfigBlock {
pub fn new(uuid: &Uuid, image: OciPackedImage) -> Result<ConfigBlock> {
let mut dir = std::env::temp_dir().clone();
dir.push(format!("krata-cfg-{}", uuid));
fs::create_dir_all(&dir)?;
@ -26,9 +27,10 @@ impl ConfigBlock<'_> {
pub fn build(&self, launch_config: &LaunchInfo) -> Result<()> {
trace!("build launch_config={:?}", launch_config);
let manifest = self.image.config.to_string()?;
let config = self.image.config.raw();
let launch = serde_json::to_string(launch_config)?;
let mut writer = FilesystemWriter::default();
writer.set_compressor(FilesystemCompressor::new(Compressor::Gzip, None)?);
writer.push_dir(
"/image",
NodeHeader {
@ -39,7 +41,7 @@ impl ConfigBlock<'_> {
},
)?;
writer.push_file(
manifest.as_bytes(),
config,
"/image/config.json",
NodeHeader {
permissions: 384,

View File

@ -375,7 +375,10 @@ impl KrataChannelBackendProcessor {
};
ring_ref = self.use_reserved_ref.unwrap_or(ring_ref);
debug!(
"channel backend for domain {} channel {}: ring-ref={} port={}",
self.domid, self.id, ring_ref, port,
);
break (ring_ref, port);
}
}
@ -389,14 +392,24 @@ impl KrataChannelBackendProcessor {
self.store
.write_string(format!("{}/state", self.backend), "4")
.await?;
let memory = self.gnttab.map_grant_refs(
vec![GrantRef {
domid: self.domid,
reference: ring_ref as u32,
}],
true,
true,
)?;
let memory = self
.gnttab
.map_grant_refs(
vec![GrantRef {
domid: self.domid,
reference: ring_ref as u32,
}],
true,
true,
)
.map_err(|e| {
anyhow!(
"failed to map grant ref {} for domid {}: {}",
ring_ref,
self.domid,
e
)
})?;
let mut channel = self.evtchn.bind(self.domid, port).await?;
unsafe {
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;

331
crates/runtime/src/ip.rs Normal file
View File

@ -0,0 +1,331 @@
use std::{
collections::HashMap,
net::{Ipv4Addr, Ipv6Addr},
str::FromStr,
sync::Arc,
};
use anyhow::{anyhow, Result};
use ipnetwork::{Ipv4Network, Ipv6Network};
use log::error;
use tokio::sync::RwLock;
use uuid::Uuid;
use xenstore::{XsdClient, XsdInterface};
#[derive(Default, Clone)]
pub struct IpVendorState {
pub ipv4: HashMap<Ipv4Addr, Uuid>,
pub ipv6: HashMap<Ipv6Addr, Uuid>,
pub pending_ipv4: HashMap<Ipv4Addr, Uuid>,
pub pending_ipv6: HashMap<Ipv6Addr, Uuid>,
}
#[derive(Clone)]
pub struct IpVendor {
store: XsdClient,
host_uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
gateway_ipv4: Ipv4Addr,
gateway_ipv6: Ipv6Addr,
state: Arc<RwLock<IpVendorState>>,
}
pub struct IpAssignment {
vendor: IpVendor,
pub uuid: Uuid,
pub ipv4: Ipv4Addr,
pub ipv6: Ipv6Addr,
pub ipv4_prefix: u8,
pub ipv6_prefix: u8,
pub gateway_ipv4: Ipv4Addr,
pub gateway_ipv6: Ipv6Addr,
pub committed: bool,
}
impl IpAssignment {
pub async fn commit(&mut self) -> Result<()> {
self.vendor.commit(self).await?;
self.committed = true;
Ok(())
}
}
impl Drop for IpAssignment {
fn drop(&mut self) {
if !self.committed {
let ipv4 = self.ipv4;
let ipv6 = self.ipv6;
let uuid = self.uuid;
let vendor = self.vendor.clone();
tokio::task::spawn(async move {
let _ = vendor.recall_raw(ipv4, ipv6, uuid, true).await;
});
}
}
}
impl IpVendor {
pub async fn new(
store: XsdClient,
host_uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
) -> Result<Self> {
let mut state = IpVendor::fetch_stored_state(&store).await?;
let (gateway_ipv4, gateway_ipv6) =
IpVendor::allocate_ipset(&mut state, host_uuid, ipv4_network, ipv6_network)?;
let vend = IpVendor {
store,
host_uuid,
ipv4_network,
ipv6_network,
gateway_ipv4,
gateway_ipv6,
state: Arc::new(RwLock::new(state)),
};
Ok(vend)
}
async fn fetch_stored_state(store: &XsdClient) -> Result<IpVendorState> {
let mut state = IpVendorState::default();
for domid_candidate in store.list("/local/domain").await? {
let dom_path = format!("/local/domain/{}", domid_candidate);
let Some(uuid) = store
.read_string(format!("{}/krata/uuid", dom_path))
.await?
.and_then(|x| Uuid::from_str(&x).ok())
else {
continue;
};
let assigned_ipv4 = store
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
.await?
.and_then(|x| Ipv4Network::from_str(&x).ok());
let assigned_ipv6 = store
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
.await?
.and_then(|x| Ipv6Network::from_str(&x).ok());
if let Some(existing_ipv4) = assigned_ipv4 {
if let Some(previous) = state.ipv4.insert(existing_ipv4.ip(), uuid) {
error!("ipv4 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv4.ip(), uuid, uuid);
}
}
if let Some(existing_ipv6) = assigned_ipv6 {
if let Some(previous) = state.ipv6.insert(existing_ipv6.ip(), uuid) {
error!("ipv6 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv6.ip(), uuid, uuid);
}
}
}
Ok(state)
}
fn allocate_ipset(
state: &mut IpVendorState,
uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
) -> Result<(Ipv4Addr, Ipv6Addr)> {
let mut found_ipv4: Option<Ipv4Addr> = None;
for ip in ipv4_network.iter() {
if ip.is_loopback() || ip.is_multicast() || ip.is_broadcast() {
continue;
}
if !ip.is_private() {
continue;
}
let last = ip.octets()[3];
if last == 0 || last > 250 {
continue;
}
if state.ipv4.contains_key(&ip) {
continue;
}
found_ipv4 = Some(ip);
break;
}
let mut found_ipv6: Option<Ipv6Addr> = None;
for ip in ipv6_network.iter() {
if ip.is_loopback() || ip.is_multicast() {
continue;
}
if state.ipv6.contains_key(&ip) {
continue;
}
found_ipv6 = Some(ip);
break;
}
let Some(ipv4) = found_ipv4 else {
return Err(anyhow!(
"unable to allocate ipv4 address, assigned network is exhausted"
));
};
let Some(ipv6) = found_ipv6 else {
return Err(anyhow!(
"unable to allocate ipv6 address, assigned network is exhausted"
));
};
state.ipv4.insert(ipv4, uuid);
state.ipv6.insert(ipv6, uuid);
Ok((ipv4, ipv6))
}
pub async fn assign(&self, uuid: Uuid) -> Result<IpAssignment> {
let mut state = self.state.write().await;
let (ipv4, ipv6) =
IpVendor::allocate_ipset(&mut state, uuid, self.ipv4_network, self.ipv6_network)?;
state.pending_ipv4.insert(ipv4, uuid);
state.pending_ipv6.insert(ipv6, uuid);
Ok(IpAssignment {
vendor: self.clone(),
uuid,
ipv4,
ipv6,
ipv4_prefix: self.ipv4_network.prefix(),
ipv6_prefix: self.ipv6_network.prefix(),
gateway_ipv4: self.gateway_ipv4,
gateway_ipv6: self.gateway_ipv6,
committed: false,
})
}
pub async fn commit(&self, assignment: &IpAssignment) -> Result<()> {
let mut state = self.state.write().await;
if state.pending_ipv4.remove(&assignment.ipv4) != Some(assignment.uuid) {
return Err(anyhow!("matching pending ipv4 assignment was not found"));
}
if state.pending_ipv6.remove(&assignment.ipv6) != Some(assignment.uuid) {
return Err(anyhow!("matching pending ipv6 assignment was not found"));
}
Ok(())
}
async fn recall_raw(
&self,
ipv4: Ipv4Addr,
ipv6: Ipv6Addr,
uuid: Uuid,
pending: bool,
) -> Result<()> {
let mut state = self.state.write().await;
if pending {
if state.pending_ipv4.remove(&ipv4) != Some(uuid) {
return Err(anyhow!("matching pending ipv4 assignment was not found"));
}
if state.pending_ipv6.remove(&ipv6) != Some(uuid) {
return Err(anyhow!("matching pending ipv6 assignment was not found"));
}
}
if state.ipv4.remove(&ipv4) != Some(uuid) {
return Err(anyhow!("matching allocated ipv4 assignment was not found"));
}
if state.ipv6.remove(&ipv6) != Some(uuid) {
return Err(anyhow!("matching allocated ipv6 assignment was not found"));
}
Ok(())
}
pub async fn recall(&self, assignment: &IpAssignment) -> Result<()> {
self.recall_raw(assignment.ipv4, assignment.ipv6, assignment.uuid, false)
.await?;
Ok(())
}
pub async fn reload(&self) -> Result<()> {
let mut state = self.state.write().await;
let mut intermediate = IpVendor::fetch_stored_state(&self.store).await?;
intermediate.ipv4.insert(self.gateway_ipv4, self.host_uuid);
intermediate.ipv6.insert(self.gateway_ipv6, self.host_uuid);
for (ipv4, uuid) in &state.pending_ipv4 {
if let Some(previous) = intermediate.ipv4.insert(*ipv4, *uuid) {
error!("ipv4 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv4, uuid, uuid);
}
intermediate.pending_ipv4.insert(*ipv4, *uuid);
}
for (ipv6, uuid) in &state.pending_ipv6 {
if let Some(previous) = intermediate.ipv6.insert(*ipv6, *uuid) {
error!("ipv6 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv6, uuid, uuid);
}
intermediate.pending_ipv6.insert(*ipv6, *uuid);
}
*state = intermediate;
Ok(())
}
pub async fn read_domain_assignment(
&self,
uuid: Uuid,
domid: u32,
) -> Result<Option<IpAssignment>> {
let dom_path = format!("/local/domain/{}", domid);
let Some(guest_ipv4) = self
.store
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
.await?
else {
return Ok(None);
};
let Some(guest_ipv6) = self
.store
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
.await?
else {
return Ok(None);
};
let Some(gateway_ipv4) = self
.store
.read_string(format!("{}/krata/network/gateway/ipv4", dom_path))
.await?
else {
return Ok(None);
};
let Some(gateway_ipv6) = self
.store
.read_string(format!("{}/krata/network/gateway/ipv6", dom_path))
.await?
else {
return Ok(None);
};
let Some(guest_ipv4) = Ipv4Network::from_str(&guest_ipv4).ok() else {
return Ok(None);
};
let Some(guest_ipv6) = Ipv6Network::from_str(&guest_ipv6).ok() else {
return Ok(None);
};
let Some(gateway_ipv4) = Ipv4Network::from_str(&gateway_ipv4).ok() else {
return Ok(None);
};
let Some(gateway_ipv6) = Ipv6Network::from_str(&gateway_ipv6).ok() else {
return Ok(None);
};
Ok(Some(IpAssignment {
vendor: self.clone(),
uuid,
ipv4: guest_ipv4.ip(),
ipv4_prefix: guest_ipv4.prefix(),
ipv6: guest_ipv6.ip(),
ipv6_prefix: guest_ipv6.prefix(),
gateway_ipv4: gateway_ipv4.ip(),
gateway_ipv6: gateway_ipv6.ip(),
committed: true,
}))
}
pub async fn read(&self) -> Result<IpVendorState> {
Ok(self.state.read().await.clone())
}
}

View File

@ -1,36 +1,45 @@
use std::collections::HashMap;
use std::net::{IpAddr, Ipv6Addr};
use std::fs;
use std::net::IpAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::{fs, net::Ipv4Addr, str::FromStr};
use advmac::MacAddr6;
use anyhow::{anyhow, Result};
use ipnetwork::{IpNetwork, Ipv4Network};
use ipnetwork::IpNetwork;
use krata::launchcfg::{
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
LaunchPackedFormat, LaunchRoot,
};
use krataoci::packer::OciImagePacked;
use krataoci::packer::OciPackedImage;
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
use xenstore::XsdInterface;
use xenplatform::domain::BaseDomainConfig;
use crate::cfgblk::ConfigBlock;
use crate::RuntimeContext;
use super::{GuestInfo, GuestState};
pub use xenclient::{
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
};
pub struct GuestLaunchRequest {
pub format: LaunchPackedFormat,
pub kernel: Vec<u8>,
pub initrd: Vec<u8>,
pub uuid: Option<Uuid>,
pub name: Option<String>,
pub vcpus: u32,
pub mem: u64,
pub env: HashMap<String, String>,
pub run: Option<Vec<String>>,
pub pcis: Vec<PciDevice>,
pub debug: bool,
pub image: OciImagePacked,
pub image: OciPackedImage,
pub addons_image: Option<PathBuf>,
}
pub struct GuestLauncher {
@ -57,13 +66,7 @@ impl GuestLauncher {
container_mac.set_multicast(false);
let _launch_permit = self.launch_semaphore.acquire().await?;
let guest_ipv4 = self.allocate_ipv4(context).await?;
let guest_ipv6 = container_mac.to_link_local_ipv6();
let gateway_ipv4 = "10.75.70.1";
let gateway_ipv6 = "fe80::1";
let ipv4_network_mask: u32 = 16;
let ipv6_network_mask: u32 = 10;
let mut ip = context.ipvendor.assign(uuid).await?;
let launch_config = LaunchInfo {
root: LaunchRoot {
format: request.format.clone(),
@ -78,12 +81,12 @@ impl GuestLauncher {
network: Some(LaunchNetwork {
link: "eth0".to_string(),
ipv4: LaunchNetworkIpv4 {
address: format!("{}/{}", guest_ipv4, ipv4_network_mask),
gateway: gateway_ipv4.to_string(),
address: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
gateway: ip.gateway_ipv4.to_string(),
},
ipv6: LaunchNetworkIpv6 {
address: format!("{}/{}", guest_ipv6, ipv6_network_mask),
gateway: gateway_ipv6.to_string(),
address: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
gateway: ip.gateway_ipv6.to_string(),
},
resolver: LaunchNetworkResolver {
nameservers: vec![
@ -98,8 +101,10 @@ impl GuestLauncher {
run: request.run,
};
let cfgblk = ConfigBlock::new(&uuid, &request.image)?;
cfgblk.build(&launch_config)?;
let cfgblk = ConfigBlock::new(&uuid, request.image.clone())?;
let cfgblk_file = cfgblk.file.clone();
let cfgblk_dir = cfgblk.dir.clone();
tokio::task::spawn_blocking(move || cfgblk.build(&launch_config)).await??;
let image_squashfs_path = request
.image
@ -107,47 +112,91 @@ impl GuestLauncher {
.to_str()
.ok_or_else(|| anyhow!("failed to convert image path to string"))?;
let cfgblk_dir_path = cfgblk
.dir
let cfgblk_dir_path = cfgblk_dir
.to_str()
.ok_or_else(|| anyhow!("failed to convert cfgblk directory path to string"))?;
let cfgblk_squashfs_path = cfgblk
.file
let cfgblk_squashfs_path = cfgblk_file
.to_str()
.ok_or_else(|| anyhow!("failed to convert cfgblk squashfs path to string"))?;
let addons_squashfs_path = request
.addons_image
.map(|x| x.to_str().map(|x| x.to_string()))
.map(|x| {
Some(x.ok_or_else(|| anyhow!("failed to convert addons squashfs path to string")))
})
.unwrap_or(None);
let addons_squashfs_path = if let Some(path) = addons_squashfs_path {
Some(path?)
} else {
None
};
let image_squashfs_loop = context.autoloop.loopify(image_squashfs_path)?;
let cfgblk_squashfs_loop = context.autoloop.loopify(cfgblk_squashfs_path)?;
let cmdline_options = [
if request.debug { "debug" } else { "quiet" },
"elevator=noop",
];
let addons_squashfs_loop = if let Some(ref addons_squashfs_path) = addons_squashfs_path {
Some(context.autoloop.loopify(addons_squashfs_path)?)
} else {
None
};
let mut cmdline_options = ["console=hvc0"].to_vec();
if !request.debug {
cmdline_options.push("quiet");
}
let cmdline = cmdline_options.join(" ");
let guest_mac_string = container_mac.to_string().replace('-', ":");
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
let mut disks = vec![
DomainDisk {
vdev: "xvda".to_string(),
block: image_squashfs_loop.clone(),
writable: false,
},
DomainDisk {
vdev: "xvdb".to_string(),
block: cfgblk_squashfs_loop.clone(),
writable: false,
},
];
if let Some(ref addons) = addons_squashfs_loop {
disks.push(DomainDisk {
vdev: "xvdc".to_string(),
block: addons.clone(),
writable: false,
});
}
let mut loops = vec![
format!("{}:{}:none", image_squashfs_loop.path, image_squashfs_path),
format!(
"{}:{}:{}",
cfgblk_squashfs_loop.path, cfgblk_squashfs_path, cfgblk_dir_path
),
];
if let Some(ref addons) = addons_squashfs_loop {
loops.push(format!(
"{}:{}:none",
addons.path,
addons_squashfs_path
.clone()
.ok_or_else(|| anyhow!("addons squashfs path missing"))?
));
}
let mut extra_keys = vec![
("krata/uuid".to_string(), uuid.to_string()),
(
"krata/loops".to_string(),
format!(
"{}:{}:none,{}:{}:{}",
&image_squashfs_loop.path,
image_squashfs_path,
&cfgblk_squashfs_loop.path,
cfgblk_squashfs_path,
cfgblk_dir_path,
),
),
("krata/loops".to_string(), loops.join(",")),
(
"krata/network/guest/ipv4".to_string(),
format!("{}/{}", guest_ipv4, ipv4_network_mask),
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
),
(
"krata/network/guest/ipv6".to_string(),
format!("{}/{}", guest_ipv6, ipv6_network_mask),
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
),
(
"krata/network/guest/mac".to_string(),
@ -155,11 +204,11 @@ impl GuestLauncher {
),
(
"krata/network/gateway/ipv4".to_string(),
format!("{}/{}", gateway_ipv4, ipv4_network_mask),
format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
),
(
"krata/network/gateway/ipv6".to_string(),
format!("{}/{}", gateway_ipv6, ipv6_network_mask),
format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
),
(
"krata/network/gateway/mac".to_string(),
@ -172,108 +221,65 @@ impl GuestLauncher {
}
let config = DomainConfig {
base: BaseDomainConfig {
max_vcpus: request.vcpus,
mem_mb: request.mem,
kernel: request.kernel,
initrd: request.initrd,
cmdline,
uuid,
owner_domid: 0,
enable_iommu: true,
},
backend_domid: 0,
name: &xen_name,
max_vcpus: request.vcpus,
mem_mb: request.mem,
kernel_path: &context.kernel,
initrd_path: &context.initrd,
cmdline: &cmdline,
use_console_backend: Some("krata-console"),
disks: vec![
DomainDisk {
vdev: "xvda",
block: &image_squashfs_loop,
writable: false,
},
DomainDisk {
vdev: "xvdb",
block: &cfgblk_squashfs_loop,
writable: false,
},
],
name: xen_name,
swap_console_backend: Some("krata-console".to_string()),
disks,
channels: vec![DomainChannel {
typ: "krata-channel".to_string(),
initialized: false,
}],
vifs: vec![DomainNetworkInterface {
mac: &guest_mac_string,
mac: guest_mac_string.clone(),
mtu: 1500,
bridge: None,
script: None,
}],
pcis: request.pcis.clone(),
filesystems: vec![],
event_channels: vec![],
extra_keys,
extra_rw_paths: vec!["krata/guest".to_string()],
};
match context.xen.create(&config).await {
Ok(created) => Ok(GuestInfo {
name: request.name.as_ref().map(|x| x.to_string()),
uuid,
domid: created.domid,
image: request.image.digest,
loops: vec![],
guest_ipv4: Some(IpNetwork::new(
IpAddr::V4(guest_ipv4),
ipv4_network_mask as u8,
)?),
guest_ipv6: Some(IpNetwork::new(
IpAddr::V6(guest_ipv6),
ipv6_network_mask as u8,
)?),
guest_mac: Some(guest_mac_string.clone()),
gateway_ipv4: Some(IpNetwork::new(
IpAddr::V4(Ipv4Addr::from_str(gateway_ipv4)?),
ipv4_network_mask as u8,
)?),
gateway_ipv6: Some(IpNetwork::new(
IpAddr::V6(Ipv6Addr::from_str(gateway_ipv6)?),
ipv6_network_mask as u8,
)?),
gateway_mac: Some(gateway_mac_string.clone()),
state: GuestState { exit_code: None },
}),
Ok(created) => {
ip.commit().await?;
Ok(GuestInfo {
name: request.name.as_ref().map(|x| x.to_string()),
uuid,
domid: created.domid,
image: request.image.digest,
loops: vec![],
guest_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
guest_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
guest_mac: Some(guest_mac_string.clone()),
gateway_ipv4: Some(IpNetwork::new(
IpAddr::V4(ip.gateway_ipv4),
ip.ipv4_prefix,
)?),
gateway_ipv6: Some(IpNetwork::new(
IpAddr::V6(ip.gateway_ipv6),
ip.ipv6_prefix,
)?),
gateway_mac: Some(gateway_mac_string.clone()),
state: GuestState { exit_code: None },
})
}
Err(error) => {
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;
let _ = fs::remove_dir(&cfgblk.dir);
let _ = fs::remove_dir(&cfgblk_dir);
Err(error.into())
}
}
}
async fn allocate_ipv4(&self, context: &RuntimeContext) -> Result<Ipv4Addr> {
let network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
let mut used: Vec<Ipv4Addr> = vec![];
for domid_candidate in context.xen.store.list("/local/domain").await? {
let dom_path = format!("/local/domain/{}", domid_candidate);
let ip_path = format!("{}/krata/network/guest/ipv4", dom_path);
let existing_ip = context.xen.store.read_string(&ip_path).await?;
if let Some(existing_ip) = existing_ip {
let ipv4_network = Ipv4Network::from_str(&existing_ip)?;
used.push(ipv4_network.ip());
}
}
let mut found: Option<Ipv4Addr> = None;
for ip in network.iter() {
let last = ip.octets()[3];
if last == 0 || last == 255 {
continue;
}
if !used.contains(&ip) {
found = Some(ip);
break;
}
}
if found.is_none() {
return Err(anyhow!(
"unable to find ipv4 to allocate to guest, ipv4 addresses are exhausted"
));
}
Ok(found.unwrap())
}
}

View File

@ -1,12 +1,9 @@
use std::{
fs,
path::{Path, PathBuf},
str::FromStr,
sync::Arc,
};
use std::{fs, net::Ipv4Addr, path::PathBuf, str::FromStr, sync::Arc};
use anyhow::{anyhow, Result};
use ipnetwork::IpNetwork;
use ip::IpVendor;
use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network};
use log::error;
use loopdev::LoopControl;
use tokio::sync::Semaphore;
use uuid::Uuid;
@ -21,8 +18,15 @@ use self::{
pub mod autoloop;
pub mod cfgblk;
pub mod channel;
pub mod ip;
pub mod launch;
#[cfg(target_arch = "x86_64")]
type RuntimePlatform = xenplatform::x86pv::X86PvPlatform;
#[cfg(not(target_arch = "x86_64"))]
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
pub struct GuestLoopInfo {
pub device: String,
pub file: String,
@ -51,44 +55,24 @@ pub struct GuestInfo {
#[derive(Clone)]
pub struct RuntimeContext {
pub autoloop: AutoLoop,
pub xen: XenClient,
pub kernel: String,
pub initrd: String,
pub xen: XenClient<RuntimePlatform>,
pub ipvendor: IpVendor,
}
impl RuntimeContext {
pub async fn new(store: String) -> Result<Self> {
let mut image_cache_path = PathBuf::from(&store);
image_cache_path.push("cache");
fs::create_dir_all(&image_cache_path)?;
let xen = XenClient::open(0).await?;
image_cache_path.push("image");
fs::create_dir_all(&image_cache_path)?;
let kernel = RuntimeContext::detect_guest_file(&store, "kernel")?;
let initrd = RuntimeContext::detect_guest_file(&store, "initrd")?;
pub async fn new(host_uuid: Uuid) -> Result<Self> {
let xen = XenClient::new(0, RuntimePlatform::new()).await?;
let ipv4_network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
let ipv6_network = Ipv6Network::from_str("fdd4:1476:6c7e::/48")?;
let ipvend =
IpVendor::new(xen.store.clone(), host_uuid, ipv4_network, ipv6_network).await?;
Ok(RuntimeContext {
autoloop: AutoLoop::new(LoopControl::open()?),
xen,
kernel,
initrd,
ipvendor: ipvend,
})
}
fn detect_guest_file(store: &str, name: &str) -> Result<String> {
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
if path.is_file() {
return path_as_string(&path);
}
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
if path.is_file() {
return path_as_string(&path);
}
Err(anyhow!("unable to find required guest file: {}", name))
}
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
let mut guests: Vec<GuestInfo> = Vec::new();
for domid_candidate in self.xen.store.list("/local/domain").await? {
@ -248,18 +232,18 @@ impl RuntimeContext {
#[derive(Clone)]
pub struct Runtime {
store: Arc<String>,
host_uuid: Uuid,
context: RuntimeContext,
launch_semaphore: Arc<Semaphore>,
}
impl Runtime {
pub async fn new(store: String) -> Result<Self> {
let context = RuntimeContext::new(store.clone()).await?;
pub async fn new(host_uuid: Uuid) -> Result<Self> {
let context = RuntimeContext::new(host_uuid).await?;
Ok(Self {
store: Arc::new(store),
host_uuid,
context,
launch_semaphore: Arc::new(Semaphore::new(1)),
launch_semaphore: Arc::new(Semaphore::new(10)),
})
}
@ -293,6 +277,11 @@ impl Runtime {
return Err(anyhow!("unable to find krata uuid based on the domain",));
}
let uuid = Uuid::parse_str(&uuid)?;
let ip = self
.context
.ipvendor
.read_domain_assignment(uuid, domid)
.await?;
let loops = store
.read_string(format!("{}/krata/loops", dom_path).as_str())
.await?;
@ -312,6 +301,16 @@ impl Runtime {
}
}
}
if let Some(ip) = ip {
if let Err(error) = self.context.ipvendor.recall(&ip).await {
error!(
"failed to recall ip assignment for guest {}: {}",
uuid, error
);
}
}
Ok(uuid)
}
@ -320,12 +319,6 @@ impl Runtime {
}
pub async fn dupe(&self) -> Result<Runtime> {
Runtime::new((*self.store).clone()).await
Runtime::new(self.host_uuid).await
}
}
fn path_as_string(path: &Path) -> Result<String> {
path.to_str()
.ok_or_else(|| anyhow!("unable to convert path to string"))
.map(|x| x.to_string())
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata-xencall"
description = "An implementation of direct interfacing to xen privcmd for krata."
description = "An implementation of direct interfacing to Xen privcmd for krata"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -33,7 +33,3 @@ path = "examples/domain_create.rs"
[[example]]
name = "xencall-version-capabilities"
path = "examples/version_capabilities.rs"
[[example]]
name = "xencall-vcpu-context"
path = "examples/vcpu_context.rs"

View File

@ -1,12 +0,0 @@
use xencall::error::Result;
use xencall::XenCall;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let context = call.get_vcpu_context(224, 0).await?;
println!("{:?}", context);
Ok(())
}

View File

@ -3,28 +3,38 @@ pub mod sys;
use crate::error::{Error, Result};
use crate::sys::{
AddressSize, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext, EvtChnAllocUnbound,
GetDomainInfo, GetPageFrameInfo3, Hypercall, HypercallInit, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, VcpuGuestContext,
VcpuGuestContextAny, XenCapabilitiesInfo, HYPERVISOR_DOMCTL, HYPERVISOR_EVENT_CHANNEL_OP,
HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP, HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION,
XENVER_CAPABILITIES, XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN,
XEN_DOMCTL_GETDOMAININFO, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_GETVCPUCONTEXT,
XEN_DOMCTL_HYPERCALL_INIT, XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN,
XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_UNPAUSEDOMAIN,
XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP, XEN_MEM_POPULATE_PHYSMAP,
AddToPhysmap, AddressSize, AssignDevice, CreateDomain, DomCtl, DomCtlValue, DomCtlVcpuContext,
EvtChnAllocUnbound, GetDomainInfo, GetPageFrameInfo3, HvmContext, HvmParam, Hypercall,
HypercallInit, IoMemPermission, IoPortPermission, IrqPermission, MaxMem, MaxVcpus, MemoryMap,
MemoryReservation, MmapBatch, MmapResource, MmuExtOp, MultiCallEntry, PagingMempool,
PciAssignDevice, XenCapabilitiesInfo, DOMCTL_DEV_PCI, HYPERVISOR_DOMCTL,
HYPERVISOR_EVENT_CHANNEL_OP, HYPERVISOR_HVM_OP, HYPERVISOR_MEMORY_OP, HYPERVISOR_MMUEXT_OP,
HYPERVISOR_MULTICALL, HYPERVISOR_XEN_VERSION, XENVER_CAPABILITIES, XEN_DOMCTL_ASSIGN_DEVICE,
XEN_DOMCTL_CREATEDOMAIN, XEN_DOMCTL_DESTROYDOMAIN, XEN_DOMCTL_GETDOMAININFO,
XEN_DOMCTL_GETHVMCONTEXT, XEN_DOMCTL_GETPAGEFRAMEINFO3, XEN_DOMCTL_HYPERCALL_INIT,
XEN_DOMCTL_IOMEM_PERMISSION, XEN_DOMCTL_IOPORT_PERMISSION, XEN_DOMCTL_IRQ_PERMISSION,
XEN_DOMCTL_MAX_MEM, XEN_DOMCTL_MAX_VCPUS, XEN_DOMCTL_PAUSEDOMAIN, XEN_DOMCTL_SETHVMCONTEXT,
XEN_DOMCTL_SETVCPUCONTEXT, XEN_DOMCTL_SET_ADDRESS_SIZE, XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
XEN_DOMCTL_UNPAUSEDOMAIN, XEN_MEM_ADD_TO_PHYSMAP, XEN_MEM_CLAIM_PAGES, XEN_MEM_MEMORY_MAP,
XEN_MEM_POPULATE_PHYSMAP,
};
use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use libc::{c_int, mmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
use log::trace;
use nix::errno::Errno;
use std::ffi::{c_long, c_uint, c_ulong, c_void};
use std::sync::Arc;
use sys::{XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION};
use std::time::Duration;
use sys::{
E820Entry, ForeignMemoryMap, PhysdevMapPirq, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP,
PHYSDEVOP_MAP_PIRQ, XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION,
XEN_MEM_SET_MEMORY_MAP,
};
use tokio::sync::Semaphore;
use tokio::time::sleep;
use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd;
use std::ptr::addr_of_mut;
use std::ptr::{addr_of_mut, null_mut};
use std::slice;
#[derive(Clone)]
@ -227,8 +237,8 @@ impl XenCall {
num: num as u32,
domid: domid as u16,
addr,
mfns: mfns.as_mut_ptr(),
errors: errors.as_mut_ptr(),
mfns: mfns.as_mut_ptr() as u64,
errors: errors.as_mut_ptr() as u64,
};
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
@ -237,7 +247,7 @@ impl XenCall {
return Err(Error::MmapBatchFailed(errno))?;
}
usleep(100);
sleep(Duration::from_micros(100)).await;
let mut i: usize = 0;
let mut paged: usize = 0;
@ -252,8 +262,8 @@ impl XenCall {
num: 1,
domid: domid as u16,
addr: addr + ((i as u64) << 12),
mfns: mfns.as_mut_ptr().add(i),
errors: errors.as_mut_ptr().add(i),
mfns: mfns.as_mut_ptr().add(i) as u64,
errors: errors.as_mut_ptr().add(i) as u64,
};
loop {
@ -453,45 +463,19 @@ impl XenCall {
Ok(())
}
pub async fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
trace!(
"domctl fd={} get_vcpu_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut wrapper = VcpuGuestContextAny {
value: VcpuGuestContext::default(),
};
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_GETVCPUCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
vcpu_context: DomCtlVcpuContext {
vcpu,
ctx: addr_of_mut!(wrapper) as c_ulong,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { wrapper.value })
}
pub async fn set_vcpu_context(
&self,
domid: u32,
vcpu: u32,
context: &VcpuGuestContext,
mut context: VcpuGuestContextAny,
) -> Result<()> {
trace!(
"domctl fd={} set_vcpu_context domid={} context={:?}",
self.handle.as_raw_fd(),
domid,
context,
unsafe { context.value }
);
let mut value = VcpuGuestContextAny { value: *context };
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SETVCPUCONTEXT,
interface_version: self.domctl_interface_version,
@ -499,7 +483,7 @@ impl XenCall {
value: DomCtlValue {
vcpu_context: DomCtlVcpuContext {
vcpu,
ctx: addr_of_mut!(value) as c_ulong,
ctx: addr_of_mut!(context) as c_ulong,
},
},
};
@ -569,26 +553,48 @@ impl XenCall {
Ok(())
}
pub async fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
pub async fn get_memory_map(&self, max_entries: u32) -> Result<Vec<E820Entry>> {
let mut memory_map = MemoryMap {
count: 0,
count: max_entries,
buffer: 0,
};
let mut entries = vec![E820Entry::default(); max_entries as usize];
memory_map.buffer = entries.as_mut_ptr() as c_ulong;
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_MEMORY_MAP as c_ulong,
addr_of_mut!(memory_map) as c_ulong,
)
.await?;
entries.truncate(memory_map.count as usize);
Ok(entries)
}
pub async fn set_memory_map(
&self,
domid: u32,
entries: Vec<E820Entry>,
) -> Result<Vec<E820Entry>> {
trace!(
"fd={} set_memory_map domid={} entries={:?}",
self.handle.as_raw_fd(),
domid,
entries
);
let mut memory_map = ForeignMemoryMap {
domid: domid as u16,
map: MemoryMap {
count: entries.len() as u32,
buffer: entries.as_ptr() as u64,
},
};
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_MEMORY_MAP as c_ulong,
XEN_MEM_SET_MEMORY_MAP as c_ulong,
addr_of_mut!(memory_map) as c_ulong,
)
.await?;
let mut buffer = vec![0u8; memory_map.count as usize * size_of_entry];
memory_map.buffer = buffer.as_mut_ptr() as c_ulong;
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_MEMORY_MAP as c_ulong,
addr_of_mut!(memory_map) as c_ulong,
)
.await?;
Ok(buffer)
Ok(entries)
}
pub async fn populate_physmap(
@ -611,24 +617,14 @@ impl XenCall {
domid: domid as u16,
};
let calls = &mut [MultiCallEntry {
op: HYPERVISOR_MEMORY_OP,
result: 0,
args: [
let code = self
.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_POPULATE_PHYSMAP as c_ulong,
addr_of_mut!(reservation) as c_ulong,
0,
0,
0,
0,
],
}];
self.multicall(calls).await?;
let code = calls[0].result;
if code > !0xfff {
return Err(Error::PopulatePhysmapFailed);
}
if code as usize > extent_starts.len() {
)
.await?;
if code as usize != extent_starts.len() {
return Err(Error::PopulatePhysmapFailed);
}
let extents = extent_starts[0..code as usize].to_vec();
@ -658,6 +654,31 @@ impl XenCall {
Ok(())
}
pub async fn add_to_physmap(&self, domid: u32, space: u32, idx: u64, pfn: u64) -> Result<()> {
trace!(
"memory fd={} add_to_physmap domid={} space={} idx={} pfn={}",
self.handle.as_raw_fd(),
domid,
space,
idx,
pfn,
);
let mut add = AddToPhysmap {
domid: domid as u16,
size: 0,
space,
idx,
gpfn: pfn,
};
self.hypercall2(
HYPERVISOR_MEMORY_OP,
XEN_MEM_ADD_TO_PHYSMAP as c_ulong,
addr_of_mut!(add) as c_ulong,
)
.await?;
Ok(())
}
pub async fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
let mut ops = MmuExtOp { cmd, arg1, arg2 };
@ -671,4 +692,229 @@ impl XenCall {
.await
.map(|_| ())
}
pub async fn iomem_permission(
&self,
domid: u32,
first_mfn: u64,
nr_mfns: u64,
allow: bool,
) -> Result<()> {
trace!(
"domctl fd={} iomem_permission domid={} first_mfn={:#x}, nr_mfns={:#x} allow={}",
self.handle.as_raw_fd(),
domid,
first_mfn,
nr_mfns,
allow,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_IOMEM_PERMISSION,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
iomem_permission: IoMemPermission {
first_mfn,
nr_mfns,
allow: if allow { 1 } else { 0 },
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub async fn ioport_permission(
&self,
domid: u32,
first_port: u32,
nr_ports: u32,
allow: bool,
) -> Result<()> {
trace!(
"domctl fd={} ioport_permission domid={} first_port={:#x}, nr_ports={:#x} allow={}",
self.handle.as_raw_fd(),
domid,
first_port,
nr_ports,
allow,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_IOPORT_PERMISSION,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
ioport_permission: IoPortPermission {
first_port,
nr_ports,
allow: if allow { 1 } else { 0 },
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
pub async fn irq_permission(&self, domid: u32, irq: u32, allow: bool) -> Result<()> {
trace!(
"domctl fd={} irq_permission domid={} irq={} allow={}",
self.handle.as_raw_fd(),
domid,
irq,
allow,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_IRQ_PERMISSION,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
irq_permission: IrqPermission {
pirq: irq,
allow: if allow { 1 } else { 0 },
pad: [0; 3],
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
#[allow(clippy::field_reassign_with_default)]
pub async fn map_pirq(&self, domid: u32, index: isize, pirq: Option<u32>) -> Result<u32> {
trace!(
"physdev fd={} map_pirq domid={} index={} pirq={:?}",
self.handle.as_raw_fd(),
domid,
index,
pirq,
);
let mut physdev = PhysdevMapPirq {
domid: domid as u16,
typ: 0x1,
index: index as c_int,
pirq: pirq.map(|x| x as c_int).unwrap_or(index as c_int),
..Default::default()
};
physdev.domid = domid as u16;
physdev.typ = 0x1;
physdev.index = index as c_int;
physdev.pirq = pirq.map(|x| x as c_int).unwrap_or(index as c_int);
self.hypercall2(
HYPERVISOR_PHYSDEV_OP,
PHYSDEVOP_MAP_PIRQ,
addr_of_mut!(physdev) as c_ulong,
)
.await?;
Ok(physdev.pirq as u32)
}
pub async fn assign_device(&self, domid: u32, sbdf: u32, flags: u32) -> Result<()> {
trace!(
"domctl fd={} assign_device domid={} sbdf={} flags={}",
self.handle.as_raw_fd(),
domid,
sbdf,
flags,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_ASSIGN_DEVICE,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
assign_device: AssignDevice {
device: DOMCTL_DEV_PCI,
flags,
pci_assign_device: PciAssignDevice { sbdf, padding: 0 },
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
#[allow(clippy::field_reassign_with_default)]
pub async fn set_hvm_param(&self, domid: u32, index: u32, value: u64) -> Result<()> {
trace!(
"set_hvm_param fd={} domid={} index={} value={:?}",
self.handle.as_raw_fd(),
domid,
index,
value,
);
let mut param = HvmParam::default();
param.domid = domid as u16;
param.index = index;
param.value = value;
self.hypercall2(HYPERVISOR_HVM_OP, 0, addr_of_mut!(param) as c_ulong)
.await?;
Ok(())
}
pub async fn get_hvm_context(&self, domid: u32, buffer: Option<&mut [u8]>) -> Result<u32> {
trace!(
"domctl fd={} get_hvm_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_GETHVMCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
hvm_context: HvmContext {
size: buffer.as_ref().map(|x| x.len()).unwrap_or(0) as u32,
buffer: buffer.map(|x| x.as_mut_ptr()).unwrap_or(null_mut()) as u64,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.hvm_context.size })
}
pub async fn set_hvm_context(&self, domid: u32, buffer: &mut [u8]) -> Result<u32> {
trace!(
"domctl fd={} set_hvm_context domid={}",
self.handle.as_raw_fd(),
domid,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SETHVMCONTEXT,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
hvm_context: HvmContext {
size: buffer.len() as u32,
buffer: buffer.as_ptr() as u64,
},
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(unsafe { domctl.value.hvm_context.size })
}
pub async fn set_paging_mempool_size(&self, domid: u32, size: u64) -> Result<()> {
trace!(
"domctl fd={} set_paging_mempool_size domid={} size={}",
self.handle.as_raw_fd(),
domid,
size,
);
let mut domctl = DomCtl {
cmd: XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE,
interface_version: self.domctl_interface_version,
domid,
value: DomCtlValue {
paging_mempool: PagingMempool { size },
},
};
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
.await?;
Ok(())
}
}

View File

@ -1,7 +1,6 @@
/// Handwritten hypercall bindings.
use nix::ioctl_readwrite_bad;
use std::ffi::{c_char, c_int, c_uint, c_ulong};
use uuid::Uuid;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -35,8 +34,8 @@ pub struct MmapBatch {
pub num: u32,
pub domid: u16,
pub addr: u64,
pub mfns: *mut u64,
pub errors: *mut c_int,
pub mfns: u64,
pub errors: u64,
}
#[repr(C)]
@ -104,6 +103,7 @@ pub const XEN_DOMCTL_CDF_HAP: u32 = 1u32 << 1;
pub const XEN_DOMCTL_CDF_S3_INTEGRITY: u32 = 1u32 << 2;
pub const XEN_DOMCTL_CDF_OOS_OFF: u32 = 1u32 << 3;
pub const XEN_DOMCTL_CDF_XS_DOMAIN: u32 = 1u32 << 4;
pub const XEN_DOMCTL_CDF_IOMMU: u32 = 1u32 << 5;
pub const XEN_X86_EMU_LAPIC: u32 = 1 << 0;
pub const XEN_X86_EMU_HPET: u32 = 1 << 1;
@ -199,6 +199,7 @@ pub const XEN_DOMCTL_PSR_CAT_OP: u32 = 78;
pub const XEN_DOMCTL_SOFT_RESET: u32 = 79;
pub const XEN_DOMCTL_SET_GNTTAB_LIMITS: u32 = 80;
pub const XEN_DOMCTL_VUART_OP: u32 = 81;
pub const XEN_DOMCTL_SET_PAGING_MEMPOOL_SIZE: u32 = 86;
pub const XEN_DOMCTL_GDBSX_GUESTMEMIO: u32 = 1000;
pub const XEN_DOMCTL_GDBSX_PAUSEVCPU: u32 = 1001;
pub const XEN_DOMCTL_GDBSX_UNPAUSEVCPU: u32 = 1002;
@ -237,6 +238,12 @@ pub union DomCtlValue {
pub vcpu_context: DomCtlVcpuContext,
pub address_size: AddressSize,
pub get_page_frame_info: GetPageFrameInfo3,
pub ioport_permission: IoPortPermission,
pub iomem_permission: IoMemPermission,
pub irq_permission: IrqPermission,
pub assign_device: AssignDevice,
pub hvm_context: HvmContext,
pub paging_mempool: PagingMempool,
pub pad: [u8; 128],
}
@ -261,11 +268,8 @@ impl Default for CreateDomain {
fn default() -> Self {
CreateDomain {
ssidref: SECINITSID_DOMU,
handle: Uuid::new_v4().into_bytes(),
#[cfg(target_arch = "x86_64")]
handle: [0; 16],
flags: 0,
#[cfg(target_arch = "aarch64")]
flags: 1 << XEN_DOMCTL_CDF_HVM_GUEST,
iommu_opts: 0,
max_vcpus: 1,
max_evtchn_port: 1023,
@ -309,6 +313,30 @@ pub struct GetPageFrameInfo3 {
pub array: c_ulong,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct IoPortPermission {
pub first_port: u32,
pub nr_ports: u32,
pub allow: u8,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct IoMemPermission {
pub first_mfn: u64,
pub nr_mfns: u64,
pub allow: u8,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct IrqPermission {
pub pirq: u32,
pub allow: u8,
pub pad: [u8; 3],
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "x86_64")]
@ -317,6 +345,8 @@ pub struct ArchDomainConfig {
pub misc_flags: u32,
}
pub const X86_EMU_LAPIC: u32 = 1 << 0;
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
@ -369,6 +399,16 @@ pub struct MemoryReservation {
pub domid: u16,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct AddToPhysmap {
pub domid: u16,
pub size: u16,
pub space: u32,
pub idx: u64,
pub gpfn: u64,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct MultiCallEntry {
@ -378,8 +418,10 @@ pub struct MultiCallEntry {
}
pub const XEN_MEM_POPULATE_PHYSMAP: u32 = 6;
pub const XEN_MEM_MEMORY_MAP: u32 = 9;
pub const XEN_MEM_MEMORY_MAP: u32 = 10;
pub const XEN_MEM_SET_MEMORY_MAP: u32 = 13;
pub const XEN_MEM_CLAIM_PAGES: u32 = 24;
pub const XEN_MEM_ADD_TO_PHYSMAP: u32 = 7;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
@ -388,6 +430,13 @@ pub struct MemoryMap {
pub buffer: c_ulong,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ForeignMemoryMap {
pub domid: u16,
pub map: MemoryMap,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct VcpuGuestContextFpuCtx {
@ -402,8 +451,8 @@ impl Default for VcpuGuestContextFpuCtx {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "x86_64")]
pub struct CpuUserRegs {
#[allow(non_camel_case_types)]
pub struct x8664CpuUserRegs {
pub r15: u64,
pub r14: u64,
pub r13: u64,
@ -442,7 +491,6 @@ pub struct CpuUserRegs {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "x86_64")]
pub struct TrapInfo {
pub vector: u8,
pub flags: u8,
@ -452,11 +500,11 @@ pub struct TrapInfo {
#[repr(C)]
#[derive(Copy, Clone, Debug)]
#[cfg(target_arch = "x86_64")]
pub struct VcpuGuestContext {
#[allow(non_camel_case_types)]
pub struct x8664VcpuGuestContext {
pub fpu_ctx: VcpuGuestContextFpuCtx,
pub flags: u64,
pub user_regs: CpuUserRegs,
pub user_regs: x8664CpuUserRegs,
pub trap_ctx: [TrapInfo; 256],
pub ldt_base: u64,
pub ldt_ents: u64,
@ -475,10 +523,9 @@ pub struct VcpuGuestContext {
pub gs_base_user: u64,
}
#[cfg(target_arch = "x86_64")]
impl Default for VcpuGuestContext {
impl Default for x8664VcpuGuestContext {
fn default() -> Self {
VcpuGuestContext {
Self {
fpu_ctx: Default::default(),
flags: 0,
user_regs: Default::default(),
@ -504,8 +551,7 @@ impl Default for VcpuGuestContext {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
pub struct CpuUserRegs {
pub struct Arm64CpuUserRegs {
pub x0: u64,
pub x1: u64,
pub x2: u64,
@ -551,10 +597,9 @@ pub struct CpuUserRegs {
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
#[cfg(target_arch = "aarch64")]
pub struct VcpuGuestContext {
pub struct Arm64VcpuGuestContext {
pub flags: u32,
pub user_regs: CpuUserRegs,
pub user_regs: x8664CpuUserRegs,
pub sctlr: u64,
pub ttbcr: u64,
pub ttbr0: u64,
@ -562,7 +607,10 @@ pub struct VcpuGuestContext {
}
pub union VcpuGuestContextAny {
pub value: VcpuGuestContext,
#[cfg(target_arch = "aarch64")]
pub value: Arm64VcpuGuestContext,
#[cfg(target_arch = "x86_64")]
pub value: x8664VcpuGuestContext,
}
#[repr(C)]
@ -582,3 +630,85 @@ pub struct EvtChnAllocUnbound {
pub remote_dom: u16,
pub port: u32,
}
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Default)]
pub struct E820Entry {
pub addr: u64,
pub size: u64,
pub typ: u32,
}
pub const E820_MAX: u32 = 1024;
pub const E820_RAM: u32 = 1;
pub const E820_RESERVED: u32 = 2;
pub const E820_ACPI: u32 = 3;
pub const E820_NVS: u32 = 4;
pub const E820_UNUSABLE: u32 = 5;
pub const PHYSDEVOP_MAP_PIRQ: u64 = 13;
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct PhysdevMapPirq {
pub domid: u16,
pub typ: c_int,
pub index: c_int,
pub pirq: c_int,
pub bus: c_int,
pub devfn: c_int,
pub entry_nr: u16,
pub table_base: u64,
}
pub const DOMCTL_DEV_RDM_RELAXED: u32 = 1;
pub const DOMCTL_DEV_PCI: u32 = 0;
pub const DOMCTL_DEV_DT: u32 = 1;
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct PciAssignDevice {
pub sbdf: u32,
pub padding: u64,
}
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct AssignDevice {
pub device: u32,
pub flags: u32,
pub pci_assign_device: PciAssignDevice,
}
pub const DOMID_IO: u32 = 0x7FF1;
pub const MEMFLAGS_POPULATE_ON_DEMAND: u32 = 1 << 16;
pub struct PodTarget {
pub target_pages: u64,
pub total_pages: u64,
pub pod_cache_pages: u64,
pub pod_entries: u64,
pub domid: u16,
}
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct HvmParam {
pub domid: u16,
pub pad: u8,
pub index: u32,
pub value: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct HvmContext {
pub size: u32,
pub buffer: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct PagingMempool {
pub size: u64,
}

View File

@ -1,6 +1,6 @@
[package]
name = "krata-xenclient"
description = "An implementation of Xen userspace for krata."
description = "An implementation of Xen userspace for krata"
license.workspace = true
version.workspace = true
homepage.workspace = true
@ -10,19 +10,16 @@ resolver = "2"
[dependencies]
async-trait = { workspace = true }
elf = { workspace = true }
flate2 = { workspace = true }
indexmap = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.9" }
krata-xenstore = { path = "../xenstore", version = "^0.0.9" }
memchr = { workspace = true }
nix = { workspace = true }
slice-copy = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.11" }
krata-xenplatform = { path = "../xenplatform", version = "^0.0.11" }
krata-xenstore = { path = "../xenstore", version = "^0.0.11" }
regex = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }
xz2 = { workspace = true }
[dev-dependencies]
env_logger = { workspace = true }
@ -34,3 +31,7 @@ name = "xenclient"
[[example]]
name = "xenclient-boot"
path = "examples/boot.rs"
[[example]]
name = "xenclient-pci"
path = "examples/pci.rs"

View File

@ -1,6 +1,15 @@
use std::{env, process};
use tokio::fs;
use uuid::Uuid;
use xenclient::error::Result;
use xenclient::{DomainConfig, XenClient};
use xenplatform::domain::BaseDomainConfig;
#[cfg(target_arch = "x86_64")]
type RuntimePlatform = xenplatform::x86pv::X86PvPlatform;
#[cfg(not(target_arch = "x86_64"))]
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
#[tokio::main]
async fn main() -> Result<()> {
@ -13,23 +22,28 @@ async fn main() -> Result<()> {
}
let kernel_image_path = args.get(1).expect("argument not specified");
let initrd_path = args.get(2).expect("argument not specified");
let client = XenClient::open(0).await?;
let client = XenClient::new(0, RuntimePlatform::new()).await?;
let config = DomainConfig {
base: BaseDomainConfig {
uuid: Uuid::new_v4(),
max_vcpus: 1,
mem_mb: 512,
enable_iommu: true,
kernel: fs::read(&kernel_image_path).await?,
initrd: fs::read(&initrd_path).await?,
cmdline: "earlyprintk=xen earlycon=xen console=hvc0 init=/init".to_string(),
owner_domid: 0,
},
backend_domid: 0,
name: "xenclient-test",
max_vcpus: 1,
mem_mb: 512,
kernel_path: kernel_image_path.as_str(),
initrd_path: initrd_path.as_str(),
cmdline: "debug elevator=noop",
use_console_backend: None,
name: "xenclient-test".to_string(),
swap_console_backend: None,
disks: vec![],
channels: vec![],
vifs: vec![],
pcis: vec![],
filesystems: vec![],
extra_keys: vec![],
extra_rw_paths: vec![],
event_channels: vec![],
};
let created = client.create(&config).await?;
println!("created domain {}", created.domid);

View File

@ -0,0 +1,32 @@
use xenclient::pci::*;
use xenclient::error::Result;
#[tokio::main]
async fn main() -> Result<()> {
let backend = XenPciBackend::new();
if !backend.is_loaded().await? {
return Err(xenclient::error::Error::GenericError(
"xen-pciback module not loaded".to_string(),
));
}
println!("assignable devices:");
for device in backend.list_devices().await? {
let is_assigned = backend.is_assigned(&device).await?;
let has_slot = backend.has_slot(&device).await?;
println!("{} slot={} assigned={}", device, has_slot, is_assigned);
let resources = backend.read_resources(&device).await?;
for resource in resources {
println!(
" resource start={:#x} end={:#x} flags={:#x} bar-io={}",
resource.start,
resource.end,
resource.flags,
resource.is_bar_io()
);
}
}
Ok(())
}

View File

@ -1,288 +0,0 @@
use crate::boot::{ArchBootSetup, BootImageInfo, BootSetup, BootState, DomainSegment};
use crate::error::Result;
use crate::sys::XEN_PAGE_SHIFT;
use crate::Error;
use log::trace;
use xencall::sys::VcpuGuestContext;
pub const ARM_PAGE_SHIFT: u64 = 12;
const ARM_PAGE_SIZE: u64 = 1 << ARM_PAGE_SHIFT;
const GUEST_RAM0_BASE: u64 = 0x40000000;
const GUEST_RAM0_SIZE: u64 = 0xc0000000;
const GUEST_RAM1_BASE: u64 = 0x0200000000;
const GUEST_RAM1_SIZE: u64 = 0xfe00000000;
const GUEST_RAM_BANK_BASES: [u64; 2] = [GUEST_RAM0_BASE, GUEST_RAM1_BASE];
const GUEST_RAM_BANK_SIZES: [u64; 2] = [GUEST_RAM0_SIZE, GUEST_RAM1_SIZE];
const LPAE_SHIFT: u64 = 9;
const PFN_4K_SHIFT: u64 = 0;
const PFN_2M_SHIFT: u64 = PFN_4K_SHIFT + LPAE_SHIFT;
const PFN_1G_SHIFT: u64 = PFN_2M_SHIFT + LPAE_SHIFT;
const PFN_512G_SHIFT: u64 = PFN_1G_SHIFT + LPAE_SHIFT;
const PSR_FIQ_MASK: u64 = 1 << 6; /* Fast Interrupt mask */
const PSR_IRQ_MASK: u64 = 1 << 7; /* Interrupt mask */
const PSR_ABT_MASK: u64 = 1 << 8; /* Asynchronous Abort mask */
const PSR_MODE_EL1H: u64 = 0x05;
const PSR_GUEST64_INIT: u64 = PSR_ABT_MASK | PSR_FIQ_MASK | PSR_IRQ_MASK | PSR_MODE_EL1H;
pub struct Arm64BootSetup {}
impl Default for Arm64BootSetup {
fn default() -> Self {
Self::new()
}
}
impl Arm64BootSetup {
pub fn new() -> Arm64BootSetup {
Arm64BootSetup {}
}
async fn populate_one_size(
&self,
setup: &mut BootSetup<'_>,
pfn_shift: u64,
base_pfn: u64,
pfn_count: u64,
extents: &mut [u64],
) -> Result<u64> {
let mask = (1u64 << pfn_shift) - 1;
let next_shift = pfn_shift + LPAE_SHIFT;
let next_mask = (1u64 << next_shift) - 1;
let next_boundary = (base_pfn + (1 << next_shift)) - 1;
let mut end_pfn = base_pfn + pfn_count;
if pfn_shift == PFN_512G_SHIFT {
return Ok(0);
}
if (base_pfn & next_mask) != 0 && end_pfn > next_boundary {
end_pfn = next_boundary;
}
if (mask & base_pfn) != 0 {
return Ok(0);
}
let count = (end_pfn - base_pfn) >> pfn_shift;
if count == 0 {
return Ok(0);
}
for i in 0..count {
extents[i as usize] = base_pfn + (i << pfn_shift);
}
let result_extents = setup
.call
.populate_physmap(
setup.domid,
count,
pfn_shift as u32,
0,
&extents[0usize..count as usize],
)
.await?;
slice_copy::copy(extents, &result_extents);
Ok((result_extents.len() as u64) << pfn_shift)
}
async fn populate_guest_memory(
&mut self,
setup: &mut BootSetup<'_>,
base_pfn: u64,
pfn_count: u64,
) -> Result<()> {
let mut extents = vec![0u64; 1024 * 1024];
for pfn in 0..extents.len() {
let mut allocsz = (1024 * 1024).min(pfn_count - pfn as u64);
allocsz = self
.populate_one_size(
setup,
PFN_512G_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self
.populate_one_size(
setup,
PFN_1G_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self
.populate_one_size(
setup,
PFN_2M_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)
.await?;
if allocsz > 0 {
continue;
}
allocsz = self
.populate_one_size(
setup,
PFN_4K_SHIFT,
base_pfn + pfn as u64,
allocsz,
&mut extents,
)
.await?;
if allocsz == 0 {
return Err(Error::MemorySetupFailed("allocsz is zero"));
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl ArchBootSetup for Arm64BootSetup {
fn page_size(&mut self) -> u64 {
ARM_PAGE_SIZE
}
fn page_shift(&mut self) -> u64 {
ARM_PAGE_SHIFT
}
fn needs_early_kernel(&mut self) -> bool {
true
}
async fn setup_shared_info(&mut self, _: &mut BootSetup, _: u64) -> Result<()> {
Ok(())
}
async fn setup_start_info(&mut self, _: &mut BootSetup, _: &BootState, _: &str) -> Result<()> {
Ok(())
}
async fn meminit(
&mut self,
setup: &mut BootSetup,
total_pages: u64,
kernel_segment: &Option<DomainSegment>,
initrd_segment: &Option<DomainSegment>,
) -> Result<()> {
let kernel_segment = kernel_segment
.as_ref()
.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
setup.call.claim_pages(setup.domid, total_pages).await?;
let mut ramsize = total_pages << XEN_PAGE_SHIFT;
let bankbase = GUEST_RAM_BANK_BASES;
let bankmax = GUEST_RAM_BANK_SIZES;
let kernbase = kernel_segment.vstart;
let kernend = BootSetup::round_up(kernel_segment.size, 21);
let dtb = setup.dtb.as_ref();
let dtb_size = dtb.map(|blob| BootSetup::round_up(blob.len() as u64, XEN_PAGE_SHIFT));
let ramdisk_size = initrd_segment
.as_ref()
.map(|segment| BootSetup::round_up(segment.size, XEN_PAGE_SHIFT));
let modsize = dtb_size.unwrap_or(0) + ramdisk_size.unwrap_or(0);
let ram128mb = bankbase[0] + (128 << 20);
let mut rambank_size: [u64; 2] = [0, 0];
for i in 0..2 {
let size = if ramsize > bankmax[i] {
bankmax[i]
} else {
ramsize
};
ramsize -= size;
rambank_size[i] = size >> XEN_PAGE_SHIFT;
}
for i in 0..2 {
let size = if ramsize > bankmax[i] {
bankmax[i]
} else {
ramsize
};
ramsize -= size;
rambank_size[i] = size >> XEN_PAGE_SHIFT;
}
for i in 0..2 {
self.populate_guest_memory(setup, bankbase[i] >> XEN_PAGE_SHIFT, rambank_size[i])
.await?;
}
let bank0end = bankbase[0] + (rambank_size[0] << XEN_PAGE_SHIFT);
let _modbase = if bank0end >= ram128mb + modsize && kernend < ram128mb {
ram128mb
} else if bank0end - modsize > kernend {
bank0end - modsize
} else if kernbase - bankbase[0] > modsize {
kernbase - modsize
} else {
return Err(Error::MemorySetupFailed("unable to determine modbase"));
};
setup.call.claim_pages(setup.domid, 0).await?;
Ok(())
}
async fn bootlate(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
Ok(())
}
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()> {
let mut vcpu = VcpuGuestContext::default();
vcpu.user_regs.pc = state.image_info.virt_entry;
vcpu.user_regs.x0 = 0xffffffff;
vcpu.user_regs.x1 = 0;
vcpu.user_regs.x2 = 0;
vcpu.user_regs.x3 = 0;
vcpu.sctlr = 0x00c50078;
vcpu.ttbr0 = 0;
vcpu.ttbr1 = 0;
vcpu.ttbcr = 0;
vcpu.user_regs.cpsr = PSR_GUEST64_INIT;
vcpu.flags = 1 << 0; // VGCF_ONLINE
trace!("vcpu context: {:?}", vcpu);
setup.call.set_vcpu_context(setup.domid, 0, &vcpu).await?;
Ok(())
}
async fn alloc_p2m_segment(
&mut self,
_: &mut BootSetup,
_: &BootImageInfo,
) -> Result<Option<DomainSegment>> {
Ok(None)
}
async fn alloc_page_tables(
&mut self,
_: &mut BootSetup,
_: &BootImageInfo,
) -> Result<Option<DomainSegment>> {
Ok(None)
}
async fn setup_page_tables(&mut self, _: &mut BootSetup, _: &mut BootState) -> Result<()> {
Ok(())
}
}

View File

@ -1,422 +0,0 @@
use crate::error::Result;
use crate::mem::PhysicalPages;
use crate::sys::{GrantEntry, XEN_PAGE_SHIFT};
use crate::Error;
use libc::munmap;
use log::debug;
use nix::errno::Errno;
use slice_copy::copy;
use crate::mem::ARCH_PAGE_SHIFT;
use std::ffi::c_void;
use std::slice;
use xencall::XenCall;
pub trait BootImageLoader {
fn parse(&self) -> Result<BootImageInfo>;
fn load(&self, image_info: &BootImageInfo, dst: &mut [u8]) -> Result<()>;
}
pub const XEN_UNSET_ADDR: u64 = -1i64 as u64;
#[derive(Debug)]
pub struct BootImageInfo {
pub start: u64,
pub virt_base: u64,
pub virt_kstart: u64,
pub virt_kend: u64,
pub virt_hypercall: u64,
pub virt_entry: u64,
pub virt_p2m_base: u64,
pub unmapped_initrd: bool,
}
pub struct BootSetup<'a> {
pub(crate) call: &'a XenCall,
pub phys: PhysicalPages<'a>,
pub(crate) domid: u32,
pub(crate) virt_alloc_end: u64,
pub(crate) pfn_alloc_end: u64,
pub(crate) virt_pgtab_end: u64,
pub(crate) total_pages: u64,
#[cfg(target_arch = "aarch64")]
pub(crate) dtb: Option<Vec<u8>>,
}
#[derive(Debug)]
pub struct DomainSegment {
pub(crate) vstart: u64,
vend: u64,
pub pfn: u64,
pub(crate) addr: u64,
pub(crate) size: u64,
#[cfg(target_arch = "x86_64")]
pub(crate) pages: u64,
}
#[derive(Debug)]
pub struct BootState {
pub kernel_segment: DomainSegment,
pub start_info_segment: DomainSegment,
pub xenstore_segment: DomainSegment,
pub boot_stack_segment: DomainSegment,
pub p2m_segment: Option<DomainSegment>,
pub page_table_segment: Option<DomainSegment>,
pub image_info: BootImageInfo,
pub shared_info_frame: u64,
pub initrd_segment: DomainSegment,
pub store_evtchn: u32,
pub consoles: Vec<(u32, DomainSegment)>,
}
impl BootSetup<'_> {
pub fn new(call: &XenCall, domid: u32) -> BootSetup {
BootSetup {
call,
phys: PhysicalPages::new(call, domid),
domid,
virt_alloc_end: 0,
pfn_alloc_end: 0,
virt_pgtab_end: 0,
total_pages: 0,
#[cfg(target_arch = "aarch64")]
dtb: None,
}
}
async fn initialize_memory(
&mut self,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
total_pages: u64,
kernel_segment: &Option<DomainSegment>,
initrd_segment: &Option<DomainSegment>,
) -> Result<()> {
self.call.set_address_size(self.domid, 64).await?;
arch.meminit(self, total_pages, kernel_segment, initrd_segment)
.await?;
Ok(())
}
async fn setup_hypercall_page(&mut self, image_info: &BootImageInfo) -> Result<()> {
if image_info.virt_hypercall == XEN_UNSET_ADDR {
return Ok(());
}
let pfn = (image_info.virt_hypercall - image_info.virt_base) >> ARCH_PAGE_SHIFT;
let mfn = self.phys.p2m[pfn as usize];
self.call.hypercall_init(self.domid, mfn).await?;
Ok(())
}
pub async fn initialize<I: BootImageLoader + Send + Sync>(
&mut self,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
image_loader: &I,
initrd: &[u8],
max_vcpus: u32,
mem_mb: u64,
console_count: usize,
) -> Result<BootState> {
debug!("initialize max_vcpus={:?} mem_mb={:?}", max_vcpus, mem_mb);
let page_size = arch.page_size();
let image_info = image_loader.parse()?;
debug!("initialize image_info={:?}", image_info);
let mut kernel_segment: Option<DomainSegment> = None;
let mut initrd_segment: Option<DomainSegment> = None;
if !image_info.unmapped_initrd {
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
}
if arch.needs_early_kernel() {
kernel_segment = Some(
self.load_kernel_segment(page_size, image_loader, &image_info)
.await?,
);
}
let total_pages = mem_mb << (20 - arch.page_shift());
self.initialize_memory(arch, total_pages, &kernel_segment, &initrd_segment)
.await?;
self.virt_alloc_end = image_info.virt_base;
if kernel_segment.is_none() {
kernel_segment = Some(
self.load_kernel_segment(page_size, image_loader, &image_info)
.await?,
);
}
let mut p2m_segment: Option<DomainSegment> = None;
if image_info.virt_p2m_base >= image_info.virt_base
|| (image_info.virt_p2m_base & ((1 << arch.page_shift()) - 1)) != 0
{
p2m_segment = arch.alloc_p2m_segment(self, &image_info).await?;
}
let start_info_segment = self.alloc_page(page_size)?;
let xenstore_segment = self.alloc_page(page_size)?;
let mut consoles: Vec<(u32, DomainSegment)> = Vec::new();
for _ in 0..console_count {
let evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
let page = self.alloc_page(page_size)?;
consoles.push((evtchn, page));
}
let page_table_segment = arch.alloc_page_tables(self, &image_info).await?;
let boot_stack_segment = self.alloc_page(page_size)?;
if self.virt_pgtab_end > 0 {
self.alloc_padding_pages(page_size, self.virt_pgtab_end)?;
}
if p2m_segment.is_none() {
if let Some(mut segment) = arch.alloc_p2m_segment(self, &image_info).await? {
segment.vstart = image_info.virt_p2m_base;
p2m_segment = Some(segment);
}
}
if image_info.unmapped_initrd {
initrd_segment = Some(self.alloc_module(page_size, initrd).await?);
}
let initrd_segment = initrd_segment.unwrap();
let store_evtchn = self.call.evtchn_alloc_unbound(self.domid, 0).await?;
let kernel_segment =
kernel_segment.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
let state = BootState {
kernel_segment,
start_info_segment,
xenstore_segment,
consoles,
boot_stack_segment,
p2m_segment,
page_table_segment,
image_info,
initrd_segment,
store_evtchn,
shared_info_frame: 0,
};
debug!("initialize state={:?}", state);
Ok(state)
}
pub async fn boot(
&mut self,
arch: &mut Box<dyn ArchBootSetup + Send + Sync>,
state: &mut BootState,
cmdline: &str,
) -> Result<()> {
let domain_info = self.call.get_domain_info(self.domid).await?;
let shared_info_frame = domain_info.shared_info_frame;
state.shared_info_frame = shared_info_frame;
arch.setup_page_tables(self, state).await?;
arch.setup_start_info(self, state, cmdline).await?;
self.setup_hypercall_page(&state.image_info).await?;
arch.bootlate(self, state).await?;
arch.setup_shared_info(self, state.shared_info_frame)
.await?;
arch.vcpu(self, state).await?;
self.phys.unmap_all()?;
self.gnttab_seed(state).await?;
Ok(())
}
async fn gnttab_seed(&mut self, state: &mut BootState) -> Result<()> {
let console_gfn =
self.phys.p2m[state.consoles.first().map(|x| x.1.pfn).unwrap_or(0) as usize];
let xenstore_gfn = self.phys.p2m[state.xenstore_segment.pfn as usize];
let addr = self
.call
.mmap(0, 1 << XEN_PAGE_SHIFT)
.await
.ok_or(Error::MmapFailed)?;
self.call.map_resource(self.domid, 1, 0, 0, 1, addr).await?;
let entries = unsafe { slice::from_raw_parts_mut(addr as *mut GrantEntry, 2) };
entries[0].flags = 1 << 0;
entries[0].domid = 0;
entries[0].frame = console_gfn as u32;
entries[1].flags = 1 << 0;
entries[1].domid = 0;
entries[1].frame = xenstore_gfn as u32;
unsafe {
let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
if result != 0 {
return Err(Error::UnmapFailed(Errno::from_raw(result)));
}
}
Ok(())
}
async fn load_kernel_segment<I: BootImageLoader + Send + Sync>(
&mut self,
page_size: u64,
image_loader: &I,
image_info: &BootImageInfo,
) -> Result<DomainSegment> {
let kernel_segment = self
.alloc_segment(
page_size,
image_info.virt_kstart,
image_info.virt_kend - image_info.virt_kstart,
)
.await?;
let kernel_segment_ptr = kernel_segment.addr as *mut u8;
let kernel_segment_slice =
unsafe { slice::from_raw_parts_mut(kernel_segment_ptr, kernel_segment.size as usize) };
image_loader.load(image_info, kernel_segment_slice)?;
Ok(kernel_segment)
}
pub(crate) fn round_up(addr: u64, mask: u64) -> u64 {
addr | mask
}
#[cfg(target_arch = "x86_64")]
pub(crate) fn bits_to_mask(bits: u64) -> u64 {
(1 << bits) - 1
}
pub(crate) async fn alloc_segment(
&mut self,
page_size: u64,
start: u64,
size: u64,
) -> Result<DomainSegment> {
debug!("alloc_segment {:#x} {:#x}", start, size);
if start > 0 {
self.alloc_padding_pages(page_size, start)?;
}
let local_page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
let pages = (size + local_page_size as u64 - 1) / local_page_size as u64;
let start = self.virt_alloc_end;
let mut segment = DomainSegment {
vstart: start,
vend: 0,
pfn: self.pfn_alloc_end,
addr: 0,
size,
#[cfg(target_arch = "x86_64")]
pages,
};
self.chk_alloc_pages(page_size, pages)?;
let ptr = self.phys.pfn_to_ptr(segment.pfn, pages).await?;
segment.addr = ptr;
let slice = unsafe {
slice::from_raw_parts_mut(ptr as *mut u8, (pages * local_page_size as u64) as usize)
};
slice.fill(0);
segment.vend = self.virt_alloc_end;
debug!(
"alloc_segment {:#x} -> {:#x} (pfn {:#x} + {:#x} pages)",
start, segment.vend, segment.pfn, pages
);
Ok(segment)
}
fn alloc_page(&mut self, page_size: u64) -> Result<DomainSegment> {
let start = self.virt_alloc_end;
let pfn = self.pfn_alloc_end;
self.chk_alloc_pages(page_size, 1)?;
debug!("alloc_page {:#x} (pfn {:#x})", start, pfn);
Ok(DomainSegment {
vstart: start,
vend: (start + page_size) - 1,
pfn,
addr: 0,
size: 0,
#[cfg(target_arch = "x86_64")]
pages: 1,
})
}
async fn alloc_module(&mut self, page_size: u64, buffer: &[u8]) -> Result<DomainSegment> {
let segment = self
.alloc_segment(page_size, 0, buffer.len() as u64)
.await?;
let slice = unsafe { slice::from_raw_parts_mut(segment.addr as *mut u8, buffer.len()) };
copy(slice, buffer);
Ok(segment)
}
fn alloc_padding_pages(&mut self, page_size: u64, boundary: u64) -> Result<()> {
if (boundary & (page_size - 1)) != 0 {
return Err(Error::MemorySetupFailed("boundary is incorrect"));
}
if boundary < self.virt_alloc_end {
return Err(Error::MemorySetupFailed("boundary is below allocation end"));
}
let pages = (boundary - self.virt_alloc_end) / page_size;
self.chk_alloc_pages(page_size, pages)?;
Ok(())
}
fn chk_alloc_pages(&mut self, page_size: u64, pages: u64) -> Result<()> {
if pages > self.total_pages
|| self.pfn_alloc_end > self.total_pages
|| pages > self.total_pages - self.pfn_alloc_end
{
return Err(Error::MemorySetupFailed("no more pages left"));
}
self.pfn_alloc_end += pages;
self.virt_alloc_end += pages * page_size;
Ok(())
}
}
#[async_trait::async_trait]
pub trait ArchBootSetup {
fn page_size(&mut self) -> u64;
fn page_shift(&mut self) -> u64;
fn needs_early_kernel(&mut self) -> bool;
async fn alloc_p2m_segment(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
) -> Result<Option<DomainSegment>>;
async fn alloc_page_tables(
&mut self,
setup: &mut BootSetup,
image_info: &BootImageInfo,
) -> Result<Option<DomainSegment>>;
async fn setup_page_tables(
&mut self,
setup: &mut BootSetup,
state: &mut BootState,
) -> Result<()>;
async fn setup_start_info(
&mut self,
setup: &mut BootSetup,
state: &BootState,
cmdline: &str,
) -> Result<()>;
async fn setup_shared_info(
&mut self,
setup: &mut BootSetup,
shared_info_frame: u64,
) -> Result<()>;
async fn meminit(
&mut self,
setup: &mut BootSetup,
total_pages: u64,
kernel_segment: &Option<DomainSegment>,
initrd_segment: &Option<DomainSegment>,
) -> Result<()>;
async fn bootlate(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
async fn vcpu(&mut self, setup: &mut BootSetup, state: &mut BootState) -> Result<()>;
}

View File

@ -1,5 +1,7 @@
use std::io;
use crate::pci::PciBdf;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("io issue encountered: {0}")]
@ -18,12 +20,6 @@ pub enum Error {
PathParentNotFound,
#[error("domain does not exist")]
DomainNonExistent,
#[error("elf parse failed: {0}")]
ElfParseFailed(#[from] elf::ParseError),
#[error("mmap failed")]
MmapFailed,
#[error("munmap failed: {0}")]
UnmapFailed(nix::errno::Errno),
#[error("memory setup failed: {0}")]
MemorySetupFailed(&'static str),
#[error("populate physmap failed: wanted={0}, received={1}, input_extents={2}")]
@ -34,6 +30,18 @@ pub enum Error {
ElfInvalidImage,
#[error("provided elf image does not contain xen support")]
ElfXenSupportMissing,
#[error("regex error: {0}")]
RegexError(#[from] regex::Error),
#[error("error: {0}")]
GenericError(String),
#[error("failed to parse int: {0}")]
ParseIntError(#[from] std::num::ParseIntError),
#[error("invalid pci bdf string")]
InvalidPciBdfString,
#[error("pci device {0} is not assignable")]
PciDeviceNotAssignable(PciBdf),
#[error("xen platform error: {0}")]
XenPlatform(#[from] xenplatform::error::Error),
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,99 +1,107 @@
pub mod boot;
pub mod elfloader;
pub mod error;
pub mod mem;
pub mod sys;
#[cfg(target_arch = "x86_64")]
pub mod x86;
#[cfg(target_arch = "x86_64")]
use crate::x86::X86BootSetup;
#[cfg(target_arch = "aarch64")]
pub mod arm64;
#[cfg(target_arch = "aarch64")]
use crate::arm64::Arm64BootSetup;
use crate::boot::{ArchBootSetup, BootSetup};
use crate::elfloader::ElfImageLoader;
use crate::error::{Error, Result};
use boot::BootState;
use log::{debug, trace, warn};
use log::{debug, trace};
use pci::PciBdf;
use tokio::time::timeout;
use tx::ClientTransaction;
use xenplatform::boot::BootSetupPlatform;
use xenplatform::domain::{BaseDomainConfig, BaseDomainManager, CreatedDomain};
use std::fs::read;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use uuid::Uuid;
use xencall::sys::{CreateDomain, XEN_DOMCTL_CDF_HAP, XEN_DOMCTL_CDF_HVM_GUEST};
use xencall::XenCall;
use xenstore::{
XsPermission, XsdClient, XsdInterface, XS_PERM_NONE, XS_PERM_READ, XS_PERM_READ_WRITE,
};
use xenstore::{XsdClient, XsdInterface};
pub mod pci;
pub mod tx;
#[derive(Clone)]
pub struct XenClient {
pub struct XenClient<P: BootSetupPlatform> {
pub store: XsdClient,
call: XenCall,
domain_manager: Arc<BaseDomainManager<P>>,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct BlockDeviceRef {
pub path: String,
pub major: u32,
pub minor: u32,
}
#[derive(Debug)]
pub struct DomainDisk<'a> {
pub vdev: &'a str,
pub block: &'a BlockDeviceRef,
#[derive(Clone, Debug)]
pub struct DomainDisk {
pub vdev: String,
pub block: BlockDeviceRef,
pub writable: bool,
}
#[derive(Debug)]
pub struct DomainFilesystem<'a> {
pub path: &'a str,
pub tag: &'a str,
#[derive(Clone, Debug)]
pub struct DomainFilesystem {
pub path: String,
pub tag: String,
}
#[derive(Debug)]
pub struct DomainNetworkInterface<'a> {
pub mac: &'a str,
#[derive(Clone, Debug)]
pub struct DomainNetworkInterface {
pub mac: String,
pub mtu: u32,
pub bridge: Option<&'a str>,
pub script: Option<&'a str>,
pub bridge: Option<String>,
pub script: Option<String>,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct DomainChannel {
pub typ: String,
pub initialized: bool,
}
#[derive(Debug)]
pub struct DomainEventChannel<'a> {
pub name: &'a str,
#[derive(Clone, Debug)]
pub struct DomainEventChannel {
pub name: String,
}
#[derive(Debug)]
pub struct DomainConfig<'a> {
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub enum DomainPciRdmReservePolicy {
Invalid,
#[default]
Strict,
Relaxed,
}
impl DomainPciRdmReservePolicy {
pub fn to_option_str(&self) -> &str {
match self {
DomainPciRdmReservePolicy::Invalid => "-1",
DomainPciRdmReservePolicy::Strict => "0",
DomainPciRdmReservePolicy::Relaxed => "1",
}
}
}
#[derive(Clone, Debug)]
pub struct DomainPciDevice {
pub bdf: PciBdf,
pub permissive: bool,
pub msi_translate: bool,
pub power_management: bool,
pub rdm_reserve_policy: DomainPciRdmReservePolicy,
}
#[derive(Clone, Debug)]
pub struct DomainConfig {
pub base: BaseDomainConfig,
pub backend_domid: u32,
pub name: &'a str,
pub max_vcpus: u32,
pub mem_mb: u64,
pub kernel_path: &'a str,
pub initrd_path: &'a str,
pub cmdline: &'a str,
pub disks: Vec<DomainDisk<'a>>,
pub use_console_backend: Option<&'a str>,
pub name: String,
pub disks: Vec<DomainDisk>,
pub swap_console_backend: Option<String>,
pub channels: Vec<DomainChannel>,
pub vifs: Vec<DomainNetworkInterface<'a>>,
pub filesystems: Vec<DomainFilesystem<'a>>,
pub event_channels: Vec<DomainEventChannel<'a>>,
pub vifs: Vec<DomainNetworkInterface>,
pub filesystems: Vec<DomainFilesystem>,
pub pcis: Vec<DomainPciDevice>,
pub extra_keys: Vec<(String, String)>,
pub extra_rw_paths: Vec<String>,
}
@ -104,628 +112,106 @@ pub struct CreatedChannel {
pub evtchn: u32,
}
#[derive(Debug)]
pub struct CreatedDomain {
pub domid: u32,
pub channels: Vec<CreatedChannel>,
}
impl XenClient {
pub async fn open(current_domid: u32) -> Result<XenClient> {
#[allow(clippy::too_many_arguments)]
impl<P: BootSetupPlatform> XenClient<P> {
pub async fn new(current_domid: u32, platform: P) -> Result<XenClient<P>> {
let store = XsdClient::open().await?;
let call = XenCall::open(current_domid)?;
Ok(XenClient { store, call })
let call: XenCall = XenCall::open(current_domid)?;
let domain_manager = BaseDomainManager::new(call.clone(), platform).await?;
Ok(XenClient {
store,
call,
domain_manager: Arc::new(domain_manager),
})
}
pub async fn create(&self, config: &DomainConfig<'_>) -> Result<CreatedDomain> {
let mut domain = CreateDomain {
max_vcpus: config.max_vcpus,
..Default::default()
};
if cfg!(target_arch = "aarch64") {
domain.flags = XEN_DOMCTL_CDF_HVM_GUEST | XEN_DOMCTL_CDF_HAP;
}
let domid = self.call.create_domain(domain).await?;
match self.init(domid, &domain, config).await {
Ok(created) => Ok(created),
pub async fn create(&self, config: &DomainConfig) -> Result<CreatedDomain> {
let created = self.domain_manager.create(config.base.clone()).await?;
match self.init(created.domid, config, &created).await {
Ok(_) => Ok(created),
Err(err) => {
// ignore since destroying a domain is best
// effort when an error occurs
let _ = self.destroy(domid).await;
let _ = self.domain_manager.destroy(created.domid).await;
Err(err)
}
}
}
async fn init(
&self,
domid: u32,
domain: &CreateDomain,
config: &DomainConfig<'_>,
) -> Result<CreatedDomain> {
trace!(
"XenClient init domid={} domain={:?} config={:?}",
domid,
domain,
config
);
let backend_dom_path = self.store.get_domain_path(0).await?;
let dom_path = self.store.get_domain_path(domid).await?;
let uuid_string = Uuid::from_bytes(domain.handle).to_string();
let vm_path = format!("/vm/{}", uuid_string);
pub async fn transaction(&self, domid: u32, backend_domid: u32) -> Result<ClientTransaction> {
ClientTransaction::new(&self.store, domid, backend_domid).await
}
let ro_perm = &[
XsPermission {
id: 0,
perms: XS_PERM_NONE,
},
XsPermission {
id: domid,
perms: XS_PERM_READ,
},
];
let rw_perm = &[XsPermission {
id: domid,
perms: XS_PERM_READ_WRITE,
}];
let no_perm = &[XsPermission {
id: 0,
perms: XS_PERM_NONE,
}];
{
let tx = self.store.transaction().await?;
tx.rm(dom_path.as_str()).await?;
tx.mknod(dom_path.as_str(), ro_perm).await?;
tx.rm(vm_path.as_str()).await?;
tx.mknod(vm_path.as_str(), ro_perm).await?;
tx.mknod(vm_path.as_str(), no_perm).await?;
tx.mknod(format!("{}/device", vm_path).as_str(), no_perm)
.await?;
tx.write_string(format!("{}/vm", dom_path).as_str(), &vm_path)
.await?;
tx.mknod(format!("{}/cpu", dom_path).as_str(), ro_perm)
.await?;
tx.mknod(format!("{}/memory", dom_path).as_str(), ro_perm)
.await?;
tx.mknod(format!("{}/control", dom_path).as_str(), ro_perm)
.await?;
tx.mknod(format!("{}/control/shutdown", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(
format!("{}/control/feature-poweroff", dom_path).as_str(),
rw_perm,
)
async fn init(&self, domid: u32, config: &DomainConfig, created: &CreatedDomain) -> Result<()> {
trace!("xenclient init domid={} domain={:?}", domid, created);
let transaction = self.transaction(domid, config.backend_domid).await?;
transaction
.add_domain_declaration(&config.name, &config.base, created)
.await?;
tx.mknod(
format!("{}/control/feature-reboot", dom_path).as_str(),
rw_perm,
)
.await?;
tx.mknod(
format!("{}/control/feature-suspend", dom_path).as_str(),
rw_perm,
)
.await?;
tx.mknod(format!("{}/control/sysrq", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(format!("{}/data", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(format!("{}/drivers", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(format!("{}/feature", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(format!("{}/attr", dom_path).as_str(), rw_perm)
.await?;
tx.mknod(format!("{}/error", dom_path).as_str(), rw_perm)
.await?;
tx.write_string(
format!("{}/uuid", vm_path).as_str(),
&Uuid::from_bytes(domain.handle).to_string(),
)
.await?;
tx.write_string(format!("{}/name", dom_path).as_str(), config.name)
.await?;
tx.write_string(format!("{}/name", vm_path).as_str(), config.name)
.await?;
for (key, value) in &config.extra_keys {
tx.write_string(format!("{}/{}", dom_path, key).as_str(), value)
.await?;
}
for path in &config.extra_rw_paths {
tx.mknod(format!("{}/{}", dom_path, path).as_str(), rw_perm)
.await?;
}
tx.commit().await?;
}
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call.set_max_mem(domid, config.mem_mb * 1024).await?;
let image_loader = ElfImageLoader::load_file_kernel(config.kernel_path)?;
let xenstore_evtchn: u32;
let xenstore_mfn: u64;
let p2m: Vec<u64>;
let mut state: BootState;
{
let mut boot = BootSetup::new(&self.call, domid);
#[cfg(target_arch = "x86_64")]
let mut arch = Box::new(X86BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
#[cfg(target_arch = "aarch64")]
let mut arch = Box::new(Arm64BootSetup::new()) as Box<dyn ArchBootSetup + Send + Sync>;
let initrd = read(config.initrd_path)?;
state = boot
.initialize(
&mut arch,
&image_loader,
initrd.as_slice(),
config.max_vcpus,
config.mem_mb,
1,
)
.await?;
boot.boot(&mut arch, &mut state, config.cmdline).await?;
xenstore_evtchn = state.store_evtchn;
xenstore_mfn = boot.phys.p2m[state.xenstore_segment.pfn as usize];
p2m = boot.phys.p2m;
}
{
let tx = self.store.transaction().await?;
tx.write_string(format!("{}/image/os_type", vm_path).as_str(), "linux")
.await?;
tx.write_string(
format!("{}/image/kernel", vm_path).as_str(),
config.kernel_path,
)
.await?;
tx.write_string(
format!("{}/image/ramdisk", vm_path).as_str(),
config.initrd_path,
)
.await?;
tx.write_string(
format!("{}/image/cmdline", vm_path).as_str(),
config.cmdline,
)
.await?;
tx.write_string(
format!("{}/memory/static-max", dom_path).as_str(),
&(config.mem_mb * 1024).to_string(),
)
.await?;
tx.write_string(
format!("{}/memory/target", dom_path).as_str(),
&(config.mem_mb * 1024).to_string(),
)
.await?;
tx.write_string(format!("{}/memory/videoram", dom_path).as_str(), "0")
.await?;
tx.write_string(format!("{}/domid", dom_path).as_str(), &domid.to_string())
.await?;
tx.write_string(
format!("{}/store/port", dom_path).as_str(),
&xenstore_evtchn.to_string(),
)
.await?;
tx.write_string(
format!("{}/store/ring-ref", dom_path).as_str(),
&xenstore_mfn.to_string(),
)
.await?;
for i in 0..config.max_vcpus {
let path = format!("{}/cpu/{}", dom_path, i);
tx.mkdir(&path).await?;
tx.set_perms(&path, ro_perm).await?;
let path = format!("{}/cpu/{}/availability", dom_path, i);
tx.write_string(&path, "online").await?;
tx.set_perms(&path, ro_perm).await?;
}
tx.commit().await?;
}
transaction.commit().await?;
if !self
.store
.introduce_domain(domid, xenstore_mfn, xenstore_evtchn)
.introduce_domain(domid, created.store_mfn, created.store_evtchn)
.await?
{
return Err(Error::IntroduceDomainFailed);
}
self.console_device_add(
&DomainChannel {
typ: config
.use_console_backend
.unwrap_or("xenconsoled")
.to_string(),
initialized: true,
},
&p2m,
&state,
&dom_path,
&backend_dom_path,
config.backend_domid,
domid,
0,
)
.await?;
let transaction = self.transaction(domid, config.backend_domid).await?;
transaction
.add_channel_device(
created,
0,
&DomainChannel {
typ: config
.swap_console_backend
.clone()
.unwrap_or("xenconsoled".to_string())
.to_string(),
initialized: true,
},
)
.await?;
let mut channels: Vec<CreatedChannel> = Vec::new();
for (index, channel) in config.channels.iter().enumerate() {
let (Some(ring_ref), Some(evtchn)) = self
.console_device_add(
channel,
&p2m,
&state,
&dom_path,
&backend_dom_path,
config.backend_domid,
domid,
index + 1,
)
.await?
else {
continue;
};
channels.push(CreatedChannel { ring_ref, evtchn });
transaction
.add_channel_device(created, index + 1, channel)
.await?;
}
for (index, disk) in config.disks.iter().enumerate() {
self.disk_device_add(
&dom_path,
&backend_dom_path,
config.backend_domid,
domid,
index,
disk,
)
.await?;
transaction.add_vbd_device(index, disk).await?;
}
for (index, filesystem) in config.filesystems.iter().enumerate() {
self.fs_9p_device_add(
&dom_path,
&backend_dom_path,
config.backend_domid,
domid,
index,
filesystem,
)
.await?;
transaction.add_9pfs_device(index, filesystem).await?;
}
for (index, vif) in config.vifs.iter().enumerate() {
self.vif_device_add(
&dom_path,
&backend_dom_path,
config.backend_domid,
domid,
index,
vif,
)
.await?;
transaction.add_vif_device(index, vif).await?;
}
for channel in &config.event_channels {
let id = self
.call
.evtchn_alloc_unbound(domid, config.backend_domid)
.await?;
let channel_path = format!("{}/evtchn/{}", dom_path, channel.name);
self.store
.write_string(&format!("{}/name", channel_path), channel.name)
.await?;
self.store
.write_string(&format!("{}/channel", channel_path), &id.to_string())
for (index, pci) in config.pcis.iter().enumerate() {
transaction
.add_pci_device(&self.call, index, config.pcis.len(), pci)
.await?;
}
for (key, value) in &config.extra_keys {
transaction.write_key(key, value).await?;
}
for key in &config.extra_rw_paths {
transaction.add_rw_path(key).await?;
}
transaction.commit().await?;
self.call.unpause_domain(domid).await?;
Ok(CreatedDomain { domid, channels })
}
async fn disk_device_add(
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
domid: u32,
index: usize,
disk: &DomainDisk<'_>,
) -> Result<()> {
let id = (202 << 8) | (index << 4) as u64;
let backend_items: Vec<(&str, String)> = vec![
("frontend-id", domid.to_string()),
("online", "1".to_string()),
("removable", "0".to_string()),
("bootable", "1".to_string()),
("state", "1".to_string()),
("dev", disk.vdev.to_string()),
("type", "phy".to_string()),
("mode", if disk.writable { "w" } else { "r" }.to_string()),
("device-type", "disk".to_string()),
("discard-enable", "0".to_string()),
("specification", "xen".to_string()),
("physical-device-path", disk.block.path.to_string()),
(
"physical-device",
format!("{:02x}:{:02x}", disk.block.major, disk.block.minor),
),
];
let frontend_items: Vec<(&str, String)> = vec![
("backend-id", backend_domid.to_string()),
("state", "1".to_string()),
("virtual-device", id.to_string()),
("device-type", "disk".to_string()),
("trusted", "1".to_string()),
("protocol", "x86_64-abi".to_string()),
];
self.device_add(
"vbd",
id,
dom_path,
backend_dom_path,
backend_domid,
domid,
frontend_items,
backend_items,
)
.await?;
Ok(())
}
#[allow(clippy::too_many_arguments, clippy::unnecessary_unwrap)]
async fn console_device_add(
&self,
channel: &DomainChannel,
p2m: &[u64],
state: &BootState,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
domid: u32,
index: usize,
) -> Result<(Option<u64>, Option<u32>)> {
let console = state.consoles.get(index);
let port = console.map(|x| x.0);
let ring = console.map(|x| p2m[x.1.pfn as usize]);
let mut backend_entries = vec![
("frontend-id", domid.to_string()),
("online", "1".to_string()),
("protocol", "vt100".to_string()),
];
let mut frontend_entries = vec![
("backend-id", backend_domid.to_string()),
("limit", "1048576".to_string()),
("output", "pty".to_string()),
("tty", "".to_string()),
];
frontend_entries.push(("type", channel.typ.clone()));
backend_entries.push(("type", channel.typ.clone()));
if port.is_some() && ring.is_some() {
if channel.typ != "xenconsoled" {
frontend_entries.push(("state", "1".to_string()));
}
frontend_entries.extend_from_slice(&[
("port", port.unwrap().to_string()),
("ring-ref", ring.unwrap().to_string()),
]);
} else {
frontend_entries.extend_from_slice(&[
("state", "1".to_string()),
("protocol", "vt100".to_string()),
]);
}
if channel.initialized {
backend_entries.push(("state", "4".to_string()));
} else {
backend_entries.push(("state", "1".to_string()));
}
self.device_add(
"console",
index as u64,
dom_path,
backend_dom_path,
backend_domid,
domid,
frontend_entries,
backend_entries,
)
.await?;
Ok((ring, port))
}
async fn fs_9p_device_add(
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
domid: u32,
index: usize,
filesystem: &DomainFilesystem<'_>,
) -> Result<()> {
let id = 90 + index as u64;
let backend_items: Vec<(&str, String)> = vec![
("frontend-id", domid.to_string()),
("online", "1".to_string()),
("state", "1".to_string()),
("path", filesystem.path.to_string()),
("security-model", "none".to_string()),
];
let frontend_items: Vec<(&str, String)> = vec![
("backend-id", backend_domid.to_string()),
("state", "1".to_string()),
("tag", filesystem.tag.to_string()),
];
self.device_add(
"9pfs",
id,
dom_path,
backend_dom_path,
backend_domid,
domid,
frontend_items,
backend_items,
)
.await?;
Ok(())
}
async fn vif_device_add(
&self,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
domid: u32,
index: usize,
vif: &DomainNetworkInterface<'_>,
) -> Result<()> {
let id = 20 + index as u64;
let mut backend_items: Vec<(&str, String)> = vec![
("frontend-id", domid.to_string()),
("online", "1".to_string()),
("state", "1".to_string()),
("mac", vif.mac.to_string()),
("mtu", vif.mtu.to_string()),
("type", "vif".to_string()),
("handle", id.to_string()),
];
if vif.bridge.is_some() {
backend_items.extend_from_slice(&[("bridge", vif.bridge.unwrap().to_string())]);
}
if vif.script.is_some() {
backend_items.extend_from_slice(&[
("script", vif.script.unwrap().to_string()),
("hotplug-status", "".to_string()),
]);
} else {
backend_items.extend_from_slice(&[
("script", "".to_string()),
("hotplug-status", "connected".to_string()),
]);
}
let frontend_items: Vec<(&str, String)> = vec![
("backend-id", backend_domid.to_string()),
("state", "1".to_string()),
("mac", vif.mac.to_string()),
("trusted", "1".to_string()),
("mtu", vif.mtu.to_string()),
];
self.device_add(
"vif",
id,
dom_path,
backend_dom_path,
backend_domid,
domid,
frontend_items,
backend_items,
)
.await?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn device_add(
&self,
typ: &str,
id: u64,
dom_path: &str,
backend_dom_path: &str,
backend_domid: u32,
domid: u32,
frontend_items: Vec<(&str, String)>,
backend_items: Vec<(&str, String)>,
) -> Result<()> {
let console_zero = typ == "console" && id == 0;
let frontend_path = if console_zero {
format!("{}/console", dom_path)
} else {
format!("{}/device/{}/{}", dom_path, typ, id)
};
let backend_path = format!("{}/backend/{}/{}/{}", backend_dom_path, typ, domid, id);
let mut backend_items: Vec<(&str, String)> = backend_items.clone();
let mut frontend_items: Vec<(&str, String)> = frontend_items.clone();
backend_items.push(("frontend", frontend_path.clone()));
frontend_items.push(("backend", backend_path.clone()));
let frontend_perms = &[
XsPermission {
id: domid,
perms: XS_PERM_NONE,
},
XsPermission {
id: backend_domid,
perms: XS_PERM_READ,
},
];
let backend_perms = &[
XsPermission {
id: backend_domid,
perms: XS_PERM_NONE,
},
XsPermission {
id: domid,
perms: XS_PERM_READ,
},
];
let tx = self.store.transaction().await?;
tx.mknod(&frontend_path, frontend_perms).await?;
for (p, value) in &frontend_items {
let path = format!("{}/{}", frontend_path, *p);
tx.write_string(&path, value).await?;
if !console_zero {
tx.set_perms(&path, frontend_perms).await?;
}
}
tx.mknod(&backend_path, backend_perms).await?;
for (p, value) in &backend_items {
let path = format!("{}/{}", backend_path, *p);
tx.write_string(&path, value).await?;
}
tx.commit().await?;
Ok(())
}
pub async fn destroy(&self, domid: u32) -> Result<()> {
if let Err(err) = self.destroy_store(domid).await {
warn!("failed to destroy store for domain {}: {}", domid, err);
}
self.call.destroy_domain(domid).await?;
let _ = self.destroy_store(domid).await;
self.domain_manager.destroy(domid).await?;
Ok(())
}
@ -820,21 +306,4 @@ impl XenClient {
tx.commit().await?;
Ok(())
}
pub async fn get_console_path(&self, domid: u32) -> Result<String> {
let dom_path = self.store.get_domain_path(domid).await?;
let console_tty_path = format!("{}/console/tty", dom_path);
let mut tty: Option<String> = None;
for _ in 0..5 {
tty = self.store.read_string(&console_tty_path).await?;
if tty.is_some() {
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
let Some(tty) = tty else {
return Err(Error::TtyNotFound);
};
Ok(tty)
}
}

Some files were not shown because too many files have changed in this diff Show More