Compare commits

...

50 Commits

Author SHA1 Message Date
3cb0e214e9 chore: release (#362)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-29 01:50:12 +00:00
0e0c5264eb build(deps): bump the dep-updates group with 2 updates (#367)
Bumps the dep-updates group with 2 updates: [tonic-build](https://github.com/hyperium/tonic) and [tonic](https://github.com/hyperium/tonic).


Updates `tonic-build` from 0.12.1 to 0.12.2
- [Release notes](https://github.com/hyperium/tonic/releases)
- [Changelog](https://github.com/hyperium/tonic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/tonic/compare/v0.12.1...v0.12.2)

Updates `tonic` from 0.12.1 to 0.12.2
- [Release notes](https://github.com/hyperium/tonic/releases)
- [Changelog](https://github.com/hyperium/tonic/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/tonic/compare/v0.12.1...v0.12.2)

---
updated-dependencies:
- dependency-name: tonic-build
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: tonic
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-27 19:23:26 +00:00
19f35ef20a feature(krata): implement network reservation list (#366) 2024-08-26 19:05:57 +00:00
79e27256e6 build(deps): bump the dep-updates group with 6 updates (#365)
Bumps the dep-updates group with 6 updates:

| Package | From | To |
| --- | --- | --- |
| [flate2](https://github.com/rust-lang/flate2-rs) | `1.0.32` | `1.0.33` |
| [ratatui](https://github.com/ratatui/ratatui) | `0.28.0` | `0.28.1` |
| [redb](https://github.com/cberner/redb) | `2.1.1` | `2.1.2` |
| [serde_json](https://github.com/serde-rs/json) | `1.0.125` | `1.0.127` |
| [sysinfo](https://github.com/GuillaumeGomez/sysinfo) | `0.31.2` | `0.31.3` |
| [serde](https://github.com/serde-rs/serde) | `1.0.208` | `1.0.209` |


Updates `flate2` from 1.0.32 to 1.0.33
- [Release notes](https://github.com/rust-lang/flate2-rs/releases)
- [Changelog](https://github.com/rust-lang/flate2-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/flate2-rs/compare/1.0.32...1.0.33)

Updates `ratatui` from 0.28.0 to 0.28.1
- [Release notes](https://github.com/ratatui/ratatui/releases)
- [Changelog](https://github.com/ratatui/ratatui/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ratatui/ratatui/compare/v0.28.0...v0.28.1)

Updates `redb` from 2.1.1 to 2.1.2
- [Release notes](https://github.com/cberner/redb/releases)
- [Changelog](https://github.com/cberner/redb/blob/master/CHANGELOG.md)
- [Commits](https://github.com/cberner/redb/compare/v2.1.1...v2.1.2)

Updates `serde_json` from 1.0.125 to 1.0.127
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/1.0.125...1.0.127)

Updates `sysinfo` from 0.31.2 to 0.31.3
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.31.2...v0.31.3)

Updates `serde` from 1.0.208 to 1.0.209
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.208...v1.0.209)

---
updated-dependencies:
- dependency-name: flate2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: ratatui
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: redb
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: sysinfo
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Alex Zenla <alex@edera.dev>
2024-08-26 08:27:30 +00:00
b6c726e7aa feature(kernel): switch to linux-kernel-oci images (#364) 2024-08-26 06:18:56 +00:00
0d2b7a3ae3 feature(zone-exec): implement terminal resize support (#363) 2024-08-26 04:43:07 +00:00
f1e3d59b6a chore: release (#354)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-26 01:38:30 +00:00
0106b85de9 fix(zone-exec): catch panic errors and show all errors immediately (#359) 2024-08-25 07:16:20 +00:00
96ccbd50bb fix(zone-exec): ensure that the underlying process is killed when rpc is closed (#361) 2024-08-25 07:07:37 +00:00
41aa1aa707 fix(rpc): rename HostStatus to GetHostStatus (#360) 2024-08-25 06:24:46 +00:00
ec74bc8d2b fix(console): don't replay history when attaching to the console (#358) 2024-08-25 03:49:33 +00:00
694de5d1fd chore(control): split out all of the rpc calls into their own files (#357) 2024-08-25 03:03:20 +00:00
f2db826ba6 feature(config): write default config to config.toml on startup (#356) 2024-08-25 00:48:38 +00:00
7f5609a846 feature(ctl): add --format option to host status and improve cpu topology format (#355) 2024-08-23 19:26:23 +00:00
adb7b29354 chore: release (#341)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-22 23:43:03 +00:00
bd448ee8d9 fix(network): allocate host ip from allocation pool (#353) 2024-08-22 22:52:38 +00:00
1647a07226 fix(daemon): turn off trace logging (#352) 2024-08-21 22:04:15 +00:00
151b43eeec feature(zone): kernel command line control on launch (#351) 2024-08-21 20:51:09 +00:00
1123a1a50a build(deps): bump the dep-updates group across 1 directory with 3 updates (#350)
Bumps the dep-updates group with 3 updates in the / directory: [flate2](https://github.com/rust-lang/flate2-rs), [libc](https://github.com/rust-lang/libc) and [reqwest](https://github.com/seanmonstar/reqwest).


Updates `flate2` from 1.0.31 to 1.0.32
- [Release notes](https://github.com/rust-lang/flate2-rs/releases)
- [Changelog](https://github.com/rust-lang/flate2-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/flate2-rs/compare/1.0.31...1.0.32)

Updates `libc` from 0.2.157 to 0.2.158
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.158/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.157...0.2.158)

Updates `reqwest` from 0.12.5 to 0.12.7
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.12.5...v0.12.7)

---
updated-dependencies:
- dependency-name: flate2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: reqwest
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-21 06:16:57 +00:00
6a6b5b6e0b feature(xen-preflight): test for hypervisor presence explicitly and error if missing (#347)
Fixes #309
2024-08-20 00:22:28 +00:00
274136825a build(deps): bump MarcoIeni/release-plz-action in the dep-updates group (#345)
Bumps the dep-updates group with 1 update: [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `MarcoIeni/release-plz-action` from 0.5.64 to 0.5.65
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](92ae919a6b...e28810957e)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-20 00:01:47 +00:00
2ab2cda937 Add support for reading hypervisor console (#344)
* feature(xencall): add hypervisor SYSCTL_readconsole definitions

* feature(hypervisor-dmesg): xencall: add read_console_ring_raw hypercall wrapper

* feature(hypervisor-dmesg): protobuf: add ReadHypervisorConsoleRing RPC

* feature(hypervisor-dmesg): runtime: add read_hypervisor_console wrapper

* feature(hypervisor-dmesg): daemon: add ReadHypervisorConsoleRing rpc implementation

* feature(hypervisor-dmesg): ctl: add host hypervisor-messages command to get hypervisor messages

* feature(hypervisor-dmesg): cli: rename hypervisor-messages command to hv-console

* feature(hypervisor-dmesg): proto: change ReadHypervisorConsoleRing to ReadHypervisorConsole

* feature(hypervisor-dmesg): fix up kratactl protobuf calls
2024-08-19 23:49:02 +00:00
2519d76479 build(deps): bump the dep-updates group with 3 updates (#346)
Bumps the dep-updates group with 3 updates: [arrayvec](https://github.com/bluss/arrayvec), [libc](https://github.com/rust-lang/libc) and [tokio](https://github.com/tokio-rs/tokio).


Updates `arrayvec` from 0.7.4 to 0.7.6
- [Release notes](https://github.com/bluss/arrayvec/releases)
- [Changelog](https://github.com/bluss/arrayvec/blob/master/CHANGELOG.md)
- [Commits](https://github.com/bluss/arrayvec/compare/0.7.4...0.7.6)

Updates `libc` from 0.2.156 to 0.2.157
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.157/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.156...0.2.157)

Updates `tokio` from 1.39.2 to 1.39.3
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.39.2...tokio-1.39.3)

---
updated-dependencies:
- dependency-name: arrayvec
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-19 23:46:46 +00:00
dbeb8bf43b build(deps): bump the dep-updates group with 3 updates (#343)
Bumps the dep-updates group with 3 updates: [libc](https://github.com/rust-lang/libc), [clap](https://github.com/clap-rs/clap) and [serde](https://github.com/serde-rs/serde).


Updates `libc` from 0.2.155 to 0.2.156
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.156/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.155...0.2.156)

Updates `clap` from 4.5.15 to 4.5.16
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.15...clap_complete-v4.5.16)

Updates `serde` from 1.0.207 to 1.0.208
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.207...v1.0.208)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-16 06:39:43 +00:00
6093627bdd cleanup(ctl): move logic for branching ctl run steps into ControlCommands (#342) 2024-08-16 02:32:30 +00:00
1d75dfb88a chore: release (#334)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-15 19:06:56 +00:00
18bf370f74 feature(krata): first pass on cpu hotplug support (#340)
* fix(runtime): adjust memory resources inside a transaction

* feature(krata): first pass on cpu hotplug support
2024-08-15 08:06:04 +00:00
506d2ccf46 build(deps): bump serde_json in the dep-updates group (#339)
Bumps the dep-updates group with 1 update: [serde_json](https://github.com/serde-rs/json).


Updates `serde_json` from 1.0.124 to 1.0.125
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.124...1.0.125)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-15 06:20:52 +00:00
6096dee2fe chore(document-custom-kernels): Add initial documentation on custom kernels (#337) 2024-08-15 00:10:56 +00:00
bf3b73bf24 feature(exec): implement tty support (fixes #335) (#336) 2024-08-14 19:45:59 +00:00
87530edf70 feature(krata): dynamic resource allocation (closes #298) (#333) 2024-08-14 08:14:49 +00:00
1dca770091 chore: release (#306)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-14 06:37:52 +00:00
01a94ad23e feature(krata): prepare for workload rework (#276)
* chore(code): simple code cleanup

* chore(code): additional code cleanup

* feature(krata): rework api and make ip assignment persistent to database

* rework and cleanup

* fix daemon config references
2024-08-14 06:17:47 +00:00
2a107a370f build(deps): bump the dep-updates group with 2 updates (#332)
Bumps the dep-updates group with 2 updates: [ctrlc](https://github.com/Detegr/rust-ctrlc) and [indexmap](https://github.com/indexmap-rs/indexmap).


Updates `ctrlc` from 3.4.4 to 3.4.5
- [Release notes](https://github.com/Detegr/rust-ctrlc/releases)
- [Commits](https://github.com/Detegr/rust-ctrlc/compare/3.4.4...3.4.5)

Updates `indexmap` from 2.3.0 to 2.4.0
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.3.0...2.4.0)

---
updated-dependencies:
- dependency-name: ctrlc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-14 05:39:00 +00:00
313d3f72a5 build(deps): bump docker/build-push-action in the dep-updates group (#331)
Bumps the dep-updates group with 1 update: [docker/build-push-action](https://github.com/docker/build-push-action).


Updates `docker/build-push-action` from 6.6.1 to 6.7.0
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](16ebe778df...5cd11c3a4c)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-14 05:23:20 +00:00
5ec3d9d5c1 xencall: use correct op for setting cpufreq governor (#327) 2024-08-14 03:11:08 +00:00
1cf03a460e fix(idm): reimplement packet processing algorithm (#330)
* chore(xen): rewrite event channel code

* fix(idm): repair idm bugs on the file backend
2024-08-13 23:18:27 +00:00
ffc9dcc0ea build(deps): bump serde from 1.0.206 to 1.0.207 in the dep-updates group (#324)
Bumps the dep-updates group with 1 update: [serde](https://github.com/serde-rs/serde).


Updates `serde` from 1.0.206 to 1.0.207
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.206...v1.0.207)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-13 16:58:37 +00:00
0358c9c775 fix(power-trap-eacces): gracefully handle hypercall errors in power management (#325)
* daemon: reenable built-in power management policy

* xenruntime: gracefully handle power management errors
2024-08-13 08:22:05 +00:00
dcffaf110e build(deps): bump the dep-updates group with 2 updates (#316)
Bumps the dep-updates group with 2 updates: [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) and [docker/build-push-action](https://github.com/docker/build-push-action).


Updates `sigstore/cosign-installer` from 3.5.0 to 3.6.0
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](59acb6260d...4959ce089c)

Updates `docker/build-push-action` from 6.5.0 to 6.6.1
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](5176d81f87...16ebe778df)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-12 22:10:01 +00:00
b81ae5d01a build(deps): bump rust in /images in the dep-updates group (#321)
Bumps the dep-updates group in /images with 1 update: rust.


Updates `rust` from `596c7fa` to `1f5aff5`

---
updated-dependencies:
- dependency-name: rust
  dependency-type: direct:production
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-12 22:08:50 +00:00
1756bc6647 build(deps): bump the dep-updates group across 1 directory with 3 updates (#323)
Bumps the dep-updates group with 3 updates in the / directory: [serde_json](https://github.com/serde-rs/json), [clap](https://github.com/clap-rs/clap) and [serde](https://github.com/serde-rs/serde).


Updates `serde_json` from 1.0.122 to 1.0.124
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.122...v1.0.124)

Updates `clap` from 4.5.13 to 4.5.15
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.13...v4.5.15)

Updates `serde` from 1.0.205 to 1.0.206
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.205...v1.0.206)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-12 22:08:32 +00:00
6bf3741ec9 chore(o11y): add more debug logs to daemon & runtime (#318)
This change adds debug log lines to make it easier to tell where issues
are occuring during startup.
2024-08-08 19:11:51 +00:00
b7d41ee9f4 build(deps): bump serde from 1.0.204 to 1.0.205 in the dep-updates group (#315)
Bumps the dep-updates group with 1 update: [serde](https://github.com/serde-rs/serde).


Updates `serde` from 1.0.204 to 1.0.205
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.204...v1.0.205)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-08 05:52:22 +00:00
53059e8cca fix(xenbus): avoid fd close race by forgetting copy of handle (#314)
This change addresses a race condition where the read-copy of a raw FD
is closed while the write-copy is still in use. Now the read-copy is not
closed, by utilizing mem::forget.

Co-authored-by: Alex Zenla <alex@edera.dev>
2024-08-07 23:55:04 +00:00
11bb99b1e4 build(deps): bump actions/upload-artifact in the dep-updates group (#312)
Bumps the dep-updates group with 1 update: [actions/upload-artifact](https://github.com/actions/upload-artifact).


Updates `actions/upload-artifact` from 4.3.5 to 4.3.6
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](89ef406dd8...834a144ee9)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-07 19:25:49 +00:00
eaa84089ce build(deps): bump hyper-util in the dep-updates group (#311)
Bumps the dep-updates group with 1 update: [hyper-util](https://github.com/hyperium/hyper-util).


Updates `hyper-util` from 0.1.6 to 0.1.7
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.6...v0.1.7)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-07 05:17:50 +00:00
680244fc5e build(deps): bump step-security/harden-runner in the dep-updates group (#308)
Bumps the dep-updates group with 1 update: [step-security/harden-runner](https://github.com/step-security/harden-runner).


Updates `step-security/harden-runner` from 2.9.0 to 2.9.1
- [Release notes](https://github.com/step-security/harden-runner/releases)
- [Commits](0d381219dd...5c7944e73c)

---
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-06 16:56:49 +00:00
d469da4d9b chore: release (#303)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-08-06 01:57:25 +00:00
99091df3cf fix(zone): waitpid should be limited when no child processes exist (fixes #304) (#305) 2024-08-05 18:48:30 -07:00
111 changed files with 4595 additions and 2171 deletions

View File

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -26,15 +26,14 @@ jobs:
rustup component add rustfmt
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
# Temporarily ignored: https://github.com/edera-dev/krata/issues/206
- name: cargo fmt
run: ./hack/build/cargo.sh fmt --all -- --check || true
run: ./hack/build/cargo.sh fmt --all -- --check
shellcheck:
name: shellcheck
runs-on: ubuntu-latest
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -56,7 +55,7 @@ jobs:
name: full build linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -84,7 +83,7 @@ jobs:
name: full test linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -111,7 +110,7 @@ jobs:
name: full clippy linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -139,7 +138,7 @@ jobs:
name: zone initrd linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -176,7 +175,7 @@ jobs:
shell: bash
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: configure git line endings

View File

@ -20,7 +20,7 @@ jobs:
name: nightly full build linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -37,7 +37,7 @@ jobs:
- name: build systemd bundle
run: ./hack/dist/bundle.sh
- name: upload systemd bundle
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-bundle-systemd-${{ matrix.arch }}
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
@ -45,7 +45,7 @@ jobs:
- name: build deb package
run: ./hack/dist/deb.sh
- name: upload deb package
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-debian-${{ matrix.arch }}
path: "target/dist/*.deb"
@ -53,7 +53,7 @@ jobs:
- name: build apk package
run: ./hack/dist/apk.sh
- name: upload apk package
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-alpine-${{ matrix.arch }}
path: "target/dist/*_${{ matrix.arch }}.apk"
@ -79,7 +79,7 @@ jobs:
shell: bash
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: configure git line endings
@ -104,13 +104,13 @@ jobs:
- name: cargo build kratactl
run: ./hack/build/cargo.sh build --release --bin kratactl
- name: upload kratactl
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl"
if: ${{ matrix.platform.os != 'windows' }}
- name: upload kratactl
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl.exe"
@ -132,7 +132,7 @@ jobs:
packages: write
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -140,7 +140,7 @@ jobs:
with:
submodules: recursive
- name: install cosign
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
- name: setup docker buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: login to container registry
@ -150,7 +150,7 @@ jobs:
username: "${{ github.actor }}"
password: "${{ secrets.GITHUB_TOKEN }}"
- name: docker build and push ${{ matrix.component }}
uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
id: push
with:
file: ./images/Dockerfile.${{ matrix.component }}

View File

@ -27,7 +27,7 @@ jobs:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -81,7 +81,7 @@ jobs:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -129,7 +129,7 @@ jobs:
packages: write
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
@ -137,7 +137,7 @@ jobs:
with:
submodules: recursive
- name: install cosign
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
- name: setup docker buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: login to container registry
@ -151,7 +151,7 @@ jobs:
run: |
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
- name: docker build and push ${{ matrix.component }}
uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
id: push
with:
file: ./images/Dockerfile.${{ matrix.component }}

View File

@ -15,7 +15,7 @@ jobs:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: generate cultivator token
@ -37,7 +37,7 @@ jobs:
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
- name: release-plz
uses: MarcoIeni/release-plz-action@92ae919a6b3e27c0472659e3a7414ff4a00e833f # v0.5.64
uses: MarcoIeni/release-plz-action@e28810957ef1fedfa89b5e9692e750ce45f62a67 # v0.5.65
env:
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"

View File

@ -6,6 +6,72 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.0.20](https://github.com/edera-dev/krata/compare/v0.0.19...v0.0.20) - 2024-08-27
### Added
- *(krata)* implement network reservation list ([#366](https://github.com/edera-dev/krata/pull/366))
- *(zone-exec)* implement terminal resize support ([#363](https://github.com/edera-dev/krata/pull/363))
### Other
- update Cargo.toml dependencies
## [0.0.19](https://github.com/edera-dev/krata/compare/v0.0.18...v0.0.19) - 2024-08-25
### Added
- *(config)* write default config to config.toml on startup ([#356](https://github.com/edera-dev/krata/pull/356))
- *(ctl)* add --format option to host status and improve cpu topology format ([#355](https://github.com/edera-dev/krata/pull/355))
### Fixed
- *(zone-exec)* ensure that the underlying process is killed when rpc is closed ([#361](https://github.com/edera-dev/krata/pull/361))
- *(rpc)* rename HostStatus to GetHostStatus ([#360](https://github.com/edera-dev/krata/pull/360))
- *(console)* don't replay history when attaching to the console ([#358](https://github.com/edera-dev/krata/pull/358))
- *(zone-exec)* catch panic errors and show all errors immediately ([#359](https://github.com/edera-dev/krata/pull/359))
### Other
- *(control)* split out all of the rpc calls into their own files ([#357](https://github.com/edera-dev/krata/pull/357))
## [0.0.18](https://github.com/edera-dev/krata/compare/v0.0.17...v0.0.18) - 2024-08-22
### Added
- *(zone)* kernel command line control on launch ([#351](https://github.com/edera-dev/krata/pull/351))
- *(xen-preflight)* test for hypervisor presence explicitly and error if missing ([#347](https://github.com/edera-dev/krata/pull/347))
### Fixed
- *(network)* allocate host ip from allocation pool ([#353](https://github.com/edera-dev/krata/pull/353))
- *(daemon)* turn off trace logging ([#352](https://github.com/edera-dev/krata/pull/352))
### Other
- Add support for reading hypervisor console ([#344](https://github.com/edera-dev/krata/pull/344))
- *(ctl)* move logic for branching ctl run steps into ControlCommands ([#342](https://github.com/edera-dev/krata/pull/342))
- update Cargo.toml dependencies
## [0.0.17](https://github.com/edera-dev/krata/compare/v0.0.16...v0.0.17) - 2024-08-15
### Added
- *(krata)* first pass on cpu hotplug support ([#340](https://github.com/edera-dev/krata/pull/340))
- *(exec)* implement tty support (fixes [#335](https://github.com/edera-dev/krata/pull/335)) ([#336](https://github.com/edera-dev/krata/pull/336))
- *(krata)* dynamic resource allocation (closes [#298](https://github.com/edera-dev/krata/pull/298)) ([#333](https://github.com/edera-dev/krata/pull/333))
### Other
- update Cargo.toml dependencies
## [0.0.16](https://github.com/edera-dev/krata/compare/v0.0.15...v0.0.16) - 2024-08-14
### Added
- *(krata)* prepare for workload rework ([#276](https://github.com/edera-dev/krata/pull/276))
### Fixed
- *(idm)* reimplement packet processing algorithm ([#330](https://github.com/edera-dev/krata/pull/330))
- *(power-trap-eacces)* gracefully handle hypercall errors in power management ([#325](https://github.com/edera-dev/krata/pull/325))
### Other
- *(o11y)* add more debug logs to daemon & runtime ([#318](https://github.com/edera-dev/krata/pull/318))
## [0.0.15](https://github.com/edera-dev/krata/compare/v0.0.14...v0.0.15) - 2024-08-06
### Fixed
- *(zone)* waitpid should be limited when no child processes exist (fixes [#304](https://github.com/edera-dev/krata/pull/304)) ([#305](https://github.com/edera-dev/krata/pull/305))
## [0.0.14](https://github.com/edera-dev/krata/compare/v0.0.13...v0.0.14) - 2024-08-06
### Added

1029
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -18,14 +18,14 @@ members = [
resolver = "2"
[workspace.package]
version = "0.0.14"
version = "0.0.20"
homepage = "https://krata.dev"
license = "Apache-2.0"
repository = "https://github.com/edera-dev/krata"
[workspace.dependencies]
anyhow = "1.0"
arrayvec = "0.7.4"
arrayvec = "0.7.6"
async-compression = "0.4.12"
async-stream = "0.3.5"
async-trait = "0.1.81"
@ -37,8 +37,8 @@ c2rust-bitfields = "0.18.0"
cgroups-rs = "0.3.4"
circular-buffer = "0.1.7"
comfy-table = "7.1.1"
crossterm = "0.27.0"
ctrlc = "3.4.4"
crossterm = "0.28.1"
ctrlc = "3.4.5"
elf = "0.7.4"
env_logger = "0.11.5"
etherparse = "0.15.0"
@ -46,9 +46,9 @@ fancy-duration = "0.9.2"
flate2 = "1.0"
futures = "0.3.30"
hyper = "1.4.1"
hyper-util = "0.1.6"
hyper-util = "0.1.7"
human_bytes = "0.4"
indexmap = "2.3.0"
indexmap = "2.4.0"
indicatif = "0.17.8"
ipnetwork = "0.20.0"
libc = "0.2"
@ -68,32 +68,34 @@ prost = "0.13.1"
prost-build = "0.13.1"
prost-reflect-build = "0.14.0"
prost-types = "0.13.1"
pty-process = "0.4.0"
rand = "0.8.5"
ratatui = "0.27.0"
redb = "2.1.1"
ratatui = "0.28.1"
redb = "2.1.2"
regex = "1.10.6"
rtnetlink = "0.14.1"
scopeguard = "1.2.0"
serde_json = "1.0.122"
serde_json = "1.0.127"
serde_yaml = "0.9"
sha256 = "1.5.0"
signal-hook = "0.3.17"
slice-copy = "0.3.0"
smoltcp = "0.11.0"
sysinfo = "0.30.13"
sysinfo = "0.31.3"
termtree = "0.5.1"
thiserror = "1.0"
tokio-tun = "0.11.5"
tokio-util = "0.7.11"
toml = "0.8.19"
tonic-build = "0.12.1"
tower = "0.4.13"
tonic-build = "0.12.2"
tower = "0.5.0"
udp-stream = "0.0.12"
url = "2.5.2"
walkdir = "2"
xz2 = "0.1"
[workspace.dependencies.clap]
version = "4.5.13"
version = "4.5.16"
features = ["derive"]
[workspace.dependencies.prost-reflect]
@ -101,12 +103,12 @@ version = "0.14.0"
features = ["derive"]
[workspace.dependencies.reqwest]
version = "0.12.5"
version = "0.12.7"
default-features = false
features = ["rustls-tls"]
[workspace.dependencies.serde]
version = "1.0.204"
version = "1.0.209"
features = ["derive"]
[workspace.dependencies.sys-mount]
@ -114,7 +116,7 @@ version = "3.0.0"
default-features = false
[workspace.dependencies.tokio]
version = "1.39.2"
version = "1.39.3"
features = ["full"]
[workspace.dependencies.tokio-stream]
@ -122,7 +124,7 @@ version = "0.1"
features = ["io-util", "net"]
[workspace.dependencies.tonic]
version = "0.12.1"
version = "0.12.2"
features = ["tls"]
[workspace.dependencies.uuid]

View File

@ -16,7 +16,7 @@ oci-spec = { workspace = true }
scopeguard = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.14" }
krata-oci = { path = "../oci", version = "^0.0.20" }
krata-tokio-tar = { workspace = true }
uuid = { workspace = true }

View File

@ -20,7 +20,7 @@ env_logger = { workspace = true }
fancy-duration = { workspace = true }
human_bytes = { workspace = true }
indicatif = { workspace = true }
krata = { path = "../krata", version = "^0.0.14" }
krata = { path = "../krata", version = "^0.0.20" }
log = { workspace = true }
prost-reflect = { workspace = true, features = ["serde"] }
prost-types = { workspace = true }

View File

@ -23,7 +23,7 @@ enum DeviceListFormat {
}
#[derive(Parser)]
#[command(about = "List the devices on the isolation engine")]
#[command(about = "List device information")]
pub struct DeviceListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: DeviceListFormat,

View File

@ -3,9 +3,11 @@ use clap::{Parser, ValueEnum};
use comfy_table::presets::UTF8_FULL_CONDENSED;
use comfy_table::{Cell, Table};
use krata::v1::control::{
control_service_client::ControlServiceClient, HostCpuTopologyClass, HostCpuTopologyRequest,
control_service_client::ControlServiceClient, GetHostCpuTopologyRequest, HostCpuTopologyClass,
};
use serde_json::Value;
use crate::format::{kv2line, proto2dynamic, proto2kv};
use tonic::{transport::Channel, Request};
fn class_to_str(input: HostCpuTopologyClass) -> String {
@ -19,6 +21,11 @@ fn class_to_str(input: HostCpuTopologyClass) -> String {
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum HostCpuTopologyFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
}
#[derive(Parser)]
@ -31,10 +38,12 @@ pub struct HostCpuTopologyCommand {
impl HostCpuTopologyCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.get_host_cpu_topology(Request::new(HostCpuTopologyRequest {}))
.get_host_cpu_topology(Request::new(GetHostCpuTopologyRequest {}))
.await?
.into_inner();
match self.format {
HostCpuTopologyFormat::Table => {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
@ -54,6 +63,41 @@ impl HostCpuTopologyCommand {
if !table.is_empty() {
println!("{}", table);
}
}
HostCpuTopologyFormat::Json
| HostCpuTopologyFormat::JsonPretty
| HostCpuTopologyFormat::Yaml => {
let mut values = Vec::new();
for cpu in response.cpus {
let message = proto2dynamic(cpu)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == HostCpuTopologyFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == HostCpuTopologyFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
HostCpuTopologyFormat::Jsonl => {
for cpu in response.cpus {
let message = proto2dynamic(cpu)?;
println!("{}", serde_json::to_string(&message)?);
}
}
HostCpuTopologyFormat::KeyValue => {
for cpu in response.cpus {
let kvs = proto2kv(cpu)?;
println!("{}", kv2line(kvs),);
}
}
}
Ok(())
}

View File

@ -0,0 +1,23 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{
control_service_client::ControlServiceClient, ReadHypervisorConsoleRequest,
};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Display hypervisor console output")]
pub struct HostHvConsoleCommand {}
impl HostHvConsoleCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.read_hypervisor_console(Request::new(ReadHypervisorConsoleRequest {}))
.await?
.into_inner();
print!("{}", response.data);
Ok(())
}
}

View File

@ -1,22 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{control_service_client::ControlServiceClient, IdentifyHostRequest};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Identify information about the host")]
pub struct HostIdentifyCommand {}
impl HostIdentifyCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.identify_host(Request::new(IdentifyHostRequest {}))
.await?
.into_inner();
println!("Host UUID: {}", response.host_uuid);
println!("Host Domain: {}", response.host_domid);
println!("Krata Version: {}", response.krata_version);
Ok(())
}
}

View File

@ -6,12 +6,14 @@ use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
use crate::cli::host::cpu_topology::HostCpuTopologyCommand;
use crate::cli::host::identify::HostIdentifyCommand;
use crate::cli::host::hv_console::HostHvConsoleCommand;
use crate::cli::host::idm_snoop::HostIdmSnoopCommand;
use crate::cli::host::status::HostStatusCommand;
pub mod cpu_topology;
pub mod identify;
pub mod hv_console;
pub mod idm_snoop;
pub mod status;
#[derive(Parser)]
#[command(about = "Manage the host of the isolation engine")]
@ -33,8 +35,9 @@ impl HostCommand {
#[derive(Subcommand)]
pub enum HostCommands {
CpuTopology(HostCpuTopologyCommand),
Identify(HostIdentifyCommand),
Status(HostStatusCommand),
IdmSnoop(HostIdmSnoopCommand),
HvConsole(HostHvConsoleCommand),
}
impl HostCommands {
@ -46,9 +49,11 @@ impl HostCommands {
match self {
HostCommands::CpuTopology(cpu_topology) => cpu_topology.run(client).await,
HostCommands::Identify(identify) => identify.run(client).await,
HostCommands::Status(status) => status.run(client).await,
HostCommands::IdmSnoop(snoop) => snoop.run(client, events).await,
HostCommands::HvConsole(hvconsole) => hvconsole.run(client).await,
}
}
}

View File

@ -0,0 +1,60 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::v1::control::{control_service_client::ControlServiceClient, GetHostStatusRequest};
use crate::format::{kv2line, proto2dynamic, proto2kv};
use tonic::{transport::Channel, Request};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum HostStatusFormat {
Simple,
Json,
JsonPretty,
Yaml,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Get information about the host")]
pub struct HostStatusCommand {
#[arg(short, long, default_value = "simple", help = "Output format")]
format: HostStatusFormat,
}
impl HostStatusCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.get_host_status(Request::new(GetHostStatusRequest {}))
.await?
.into_inner();
match self.format {
HostStatusFormat::Simple => {
println!("Host UUID: {}", response.host_uuid);
println!("Host Domain: {}", response.host_domid);
println!("Krata Version: {}", response.krata_version);
println!("Host IPv4: {}", response.host_ipv4);
println!("Host IPv6: {}", response.host_ipv6);
println!("Host Ethernet Address: {}", response.host_mac);
}
HostStatusFormat::Json | HostStatusFormat::JsonPretty | HostStatusFormat::Yaml => {
let message = proto2dynamic(response)?;
let value = serde_json::to_value(message)?;
let encoded = if self.format == HostStatusFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == HostStatusFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
HostStatusFormat::KeyValue => {
let kvs = proto2kv(response)?;
println!("{}", kv2line(kvs),);
}
}
Ok(())
}
}

View File

@ -1,6 +1,7 @@
pub mod device;
pub mod host;
pub mod image;
pub mod network;
pub mod zone;
use crate::cli::device::DeviceCommand;
@ -12,8 +13,9 @@ use clap::Parser;
use krata::{
client::ControlClientProvider,
events::EventStream,
v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest},
v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest},
};
use network::NetworkCommand;
use tonic::{transport::Channel, Request};
#[derive(Parser)]
@ -31,10 +33,12 @@ pub struct ControlCommand {
command: ControlCommands,
}
#[allow(clippy::large_enum_variant)]
#[derive(Parser)]
pub enum ControlCommands {
Zone(ZoneCommand),
Image(ImageCommand),
Network(NetworkCommand),
Device(DeviceCommand),
Host(HostCommand),
}
@ -43,15 +47,26 @@ impl ControlCommand {
pub async fn run(self) -> Result<()> {
let client = ControlClientProvider::dial(self.connection.parse()?).await?;
let events = EventStream::open(client.clone()).await?;
self.command.run(client, events).await
}
}
match self.command {
impl ControlCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
ControlCommands::Zone(zone) => zone.run(client, events).await,
ControlCommands::Network(network) => network.run(client, events).await,
ControlCommands::Image(image) => image.run(client, events).await,
ControlCommands::Device(device) => device.run(client, events).await,
ControlCommands::Host(snoop) => snoop.run(client, events).await,
ControlCommands::Host(host) => host.run(client, events).await,
}
}
}
@ -61,14 +76,14 @@ pub async fn resolve_zone(
name: &str,
) -> Result<String> {
let reply = client
.resolve_zone(Request::new(ResolveZoneRequest {
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
name: name.to_string(),
}))
.await?
.into_inner();
if let Some(zone) = reply.zone {
Ok(zone.id)
if !reply.zone_id.is_empty() {
Ok(reply.zone_id)
} else {
Err(anyhow!("unable to resolve zone '{}'", name))
}

View File

@ -0,0 +1,43 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use reservation::NetworkReservationCommand;
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
pub mod reservation;
#[derive(Parser)]
#[command(about = "Manage the network on the isolation engine")]
pub struct NetworkCommand {
#[command(subcommand)]
subcommand: NetworkCommands,
}
impl NetworkCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum NetworkCommands {
Reservation(NetworkReservationCommand),
}
impl NetworkCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
NetworkCommands::Reservation(reservation) => reservation.run(client, events).await,
}
}
}

View File

@ -0,0 +1,125 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Table};
use krata::{
events::EventStream,
v1::{
common::NetworkReservation,
control::{control_service_client::ControlServiceClient, ListNetworkReservationsRequest},
},
};
use serde_json::Value;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum NetworkReservationListFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
Simple,
}
#[derive(Parser)]
#[command(about = "List network reservation information")]
pub struct NetworkReservationListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: NetworkReservationListFormat,
}
impl NetworkReservationListCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let reply = client
.list_network_reservations(ListNetworkReservationsRequest {})
.await?
.into_inner();
let mut reservations = reply.reservations;
reservations.sort_by(|a, b| a.uuid.cmp(&b.uuid));
match self.format {
NetworkReservationListFormat::Table => {
self.print_reservations_table(reservations)?;
}
NetworkReservationListFormat::Simple => {
for reservation in reservations {
println!(
"{}\t{}\t{}\t{}",
reservation.uuid, reservation.ipv4, reservation.ipv6, reservation.mac
);
}
}
NetworkReservationListFormat::Json
| NetworkReservationListFormat::JsonPretty
| NetworkReservationListFormat::Yaml => {
let mut values = Vec::new();
for device in reservations {
let message = proto2dynamic(device)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == NetworkReservationListFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == NetworkReservationListFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
NetworkReservationListFormat::Jsonl => {
for device in reservations {
let message = proto2dynamic(device)?;
println!("{}", serde_json::to_string(&message)?);
}
}
NetworkReservationListFormat::KeyValue => {
self.print_key_value(reservations)?;
}
}
Ok(())
}
fn print_reservations_table(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["uuid", "ipv4", "ipv6", "mac"]);
for reservation in reservations {
table.add_row(vec![
Cell::new(reservation.uuid),
Cell::new(reservation.ipv4),
Cell::new(reservation.ipv6),
Cell::new(reservation.mac),
]);
}
if table.is_empty() {
println!("no network reservations found");
} else {
println!("{}", table);
}
Ok(())
}
fn print_key_value(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
for reservation in reservations {
let kvs = proto2kv(reservation)?;
println!("{}", kv2line(kvs));
}
Ok(())
}
}

View File

@ -0,0 +1,43 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use list::NetworkReservationListCommand;
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
pub mod list;
#[derive(Parser)]
#[command(about = "Manage network reservations")]
pub struct NetworkReservationCommand {
#[command(subcommand)]
subcommand: NetworkReservationCommands,
}
impl NetworkReservationCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum NetworkReservationCommands {
List(NetworkReservationListCommand),
}
impl NetworkReservationCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
NetworkReservationCommands::List(list) => list.run(client, events).await,
}
}
}

View File

@ -23,10 +23,10 @@ impl ZoneAttachCommand {
events: EventStream,
) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let input = StdioConsoleStream::stdin_stream(zone_id.clone()).await;
let input = StdioConsoleStream::stdin_stream(zone_id.clone(), false).await;
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
let code = select! {
x = stdout_handle => {

View File

@ -2,20 +2,16 @@ use anyhow::Result;
use clap::Parser;
use krata::{
events::EventStream,
v1::{
common::ZoneStatus,
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
DestroyZoneRequest,
},
v1::control::{
control_service_client::ControlServiceClient, watch_events_reply::Event, DestroyZoneRequest,
},
};
use crate::cli::resolve_zone;
use krata::v1::common::ZoneState;
use log::error;
use tonic::{transport::Channel, Request};
use crate::cli::resolve_zone;
#[derive(Parser)]
#[command(about = "Destroy a zone")]
pub struct ZoneDestroyCommand {
@ -61,12 +57,12 @@ async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
continue;
}
let Some(state) = zone.state else {
let Some(status) = zone.status else {
continue;
};
if let Some(ref error) = state.error_info {
if state.status() == ZoneStatus::Failed {
if let Some(ref error) = status.error_status {
if status.state() == ZoneState::Failed {
error!("destroy failed: {}", error.message);
std::process::exit(1);
} else {
@ -74,7 +70,7 @@ async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
}
}
if state.status() == ZoneStatus::Destroyed {
if status.state() == ZoneState::Destroyed {
std::process::exit(0);
}
}

View File

@ -3,11 +3,13 @@ use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use crossterm::tty::IsTty;
use krata::v1::{
common::{ZoneTaskSpec, ZoneTaskSpecEnvVar},
control::{control_service_client::ControlServiceClient, ExecZoneRequest},
common::{TerminalSize, ZoneTaskSpec, ZoneTaskSpecEnvVar},
control::{control_service_client::ControlServiceClient, ExecInsideZoneRequest},
};
use tokio::io::stdin;
use tonic::{transport::Channel, Request};
use crate::console::StdioConsoleStream;
@ -21,6 +23,8 @@ pub struct ZoneExecCommand {
env: Option<Vec<String>>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(short = 't', long, help = "Allocate tty")]
tty: bool,
#[arg(help = "Zone to exec inside, either the name or the uuid")]
zone: String,
#[arg(
@ -34,7 +38,8 @@ pub struct ZoneExecCommand {
impl ZoneExecCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let initial = ExecZoneRequest {
let should_map_tty = self.tty && stdin().is_tty();
let initial = ExecInsideZoneRequest {
zone_id,
task: Some(ZoneTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
@ -46,15 +51,29 @@ impl ZoneExecCommand {
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
tty: self.tty,
}),
data: vec![],
stdin: vec![],
stdin_closed: false,
terminal_size: if should_map_tty {
let size = crossterm::terminal::size().ok();
size.map(|(columns, rows)| TerminalSize {
rows: rows as u32,
columns: columns as u32,
})
} else {
None
},
};
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
let stream = StdioConsoleStream::input_stream_exec(initial, should_map_tty).await;
let response = client.exec_zone(Request::new(stream)).await?.into_inner();
let response = client
.exec_inside_zone(Request::new(stream))
.await?
.into_inner();
let code = StdioConsoleStream::exec_output(response).await?;
let code = StdioConsoleStream::exec_output(response, should_map_tty).await?;
std::process::exit(code);
}
}

View File

@ -6,8 +6,9 @@ use krata::{
events::EventStream,
v1::{
common::{
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneOciImageSpec, ZoneSpec,
ZoneSpecDevice, ZoneStatus, ZoneTaskSpec, ZoneTaskSpecEnvVar,
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneKernelOptionsSpec,
ZoneOciImageSpec, ZoneResourceSpec, ZoneSpec, ZoneSpecDevice, ZoneState, ZoneTaskSpec,
ZoneTaskSpecEnvVar,
},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
@ -38,19 +39,40 @@ pub struct ZoneLaunchCommand {
pull_update: bool,
#[arg(short, long, help = "Name of the zone")]
name: Option<String>,
#[arg(short, long, default_value_t = 1, help = "vCPUs available to the zone")]
cpus: u32,
#[arg(
short,
long,
default_value_t = 512,
help = "Memory available to the zone, in megabytes"
short = 'C',
long = "max-cpus",
default_value_t = 4,
help = "Maximum vCPUs available for the zone"
)]
mem: u64,
max_cpus: u32,
#[arg(
short = 'c',
long = "target-cpus",
default_value_t = 1,
help = "Target vCPUs for the zone to use"
)]
target_cpus: u32,
#[arg(
short = 'M',
long = "max-memory",
default_value_t = 1024,
help = "Maximum memory available to the zone, in megabytes"
)]
max_memory: u64,
#[arg(
short = 'm',
long = "target-memory",
default_value_t = 1024,
help = "Target memory for the zone to use, in megabytes"
)]
target_memory: u64,
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
device: Vec<String>,
#[arg[short, long, help = "Environment variables set in the zone"]]
env: Option<Vec<String>>,
#[arg(short = 't', long, help = "Allocate tty for task")]
tty: bool,
#[arg(
short,
long,
@ -69,6 +91,10 @@ pub struct ZoneLaunchCommand {
initrd: Option<String>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(long, help = "Enable verbose logging on the kernel")]
kernel_verbose: bool,
#[arg(long, help = "Additional kernel cmdline options")]
kernel_cmdline_append: Option<String>,
#[arg(help = "Container image for zone to use")]
oci: String,
#[arg(
@ -120,8 +146,12 @@ impl ZoneLaunchCommand {
image: Some(image),
kernel,
initrd,
vcpus: self.cpus,
mem: self.mem,
initial_resources: Some(ZoneResourceSpec {
max_memory: self.max_memory,
target_memory: self.target_memory,
max_cpus: self.max_cpus,
target_cpus: self.target_cpus,
}),
task: Some(ZoneTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
.iter()
@ -132,6 +162,7 @@ impl ZoneLaunchCommand {
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
tty: self.tty,
}),
annotations: vec![],
devices: self
@ -139,6 +170,10 @@ impl ZoneLaunchCommand {
.iter()
.map(|name| ZoneSpecDevice { name: name.clone() })
.collect(),
kernel_options: Some(ZoneKernelOptionsSpec {
verbose: self.kernel_verbose,
cmdline_append: self.kernel_cmdline_append.clone().unwrap_or_default(),
}),
}),
};
let response = client
@ -152,10 +187,10 @@ impl ZoneLaunchCommand {
}
let code = if self.attach {
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
let input = StdioConsoleStream::stdin_stream(id.clone(), true).await;
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
select! {
x = stdout_handle => {
@ -209,12 +244,12 @@ async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
continue;
}
let Some(state) = zone.state else {
let Some(status) = zone.status else {
continue;
};
if let Some(ref error) = state.error_info {
if state.status() == ZoneStatus::Failed {
if let Some(ref error) = status.error_status {
if status.state() == ZoneState::Failed {
error!("launch failed: {}", error.message);
std::process::exit(1);
} else {
@ -222,12 +257,12 @@ async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
}
}
if state.status() == ZoneStatus::Destroyed {
if status.state() == ZoneState::Destroyed {
error!("zone destroyed");
std::process::exit(1);
}
if state.status() == ZoneStatus::Started {
if status.state() == ZoneState::Created {
break;
}
}

View File

@ -4,18 +4,19 @@ use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
use krata::{
events::EventStream,
v1::{
common::{Zone, ZoneStatus},
common::Zone,
control::{
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneRequest,
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneIdRequest,
},
},
};
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_state_text};
use krata::v1::common::ZoneState;
use krata::v1::control::GetZoneRequest;
use serde_json::Value;
use tonic::{transport::Channel, Request};
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_status_text};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ZoneListFormat {
Table,
@ -28,7 +29,7 @@ enum ZoneListFormat {
}
#[derive(Parser)]
#[command(about = "List the zones on the isolation engine")]
#[command(about = "List zone information")]
pub struct ZoneListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: ZoneListFormat,
@ -44,7 +45,14 @@ impl ZoneListCommand {
) -> Result<()> {
let mut zones = if let Some(ref zone) = self.zone {
let reply = client
.resolve_zone(Request::new(ResolveZoneRequest { name: zone.clone() }))
.resolve_zone_id(Request::new(ResolveZoneIdRequest { name: zone.clone() }))
.await?
.into_inner();
if !reply.zone_id.is_empty() {
let reply = client
.get_zone(Request::new(GetZoneRequest {
zone_id: reply.zone_id,
}))
.await?
.into_inner();
if let Some(zone) = reply.zone {
@ -52,6 +60,9 @@ impl ZoneListCommand {
} else {
return Err(anyhow!("unable to resolve zone '{}'", zone));
}
} else {
return Err(anyhow!("unable to resolve zone '{}'", zone));
}
} else {
client
.list_zones(Request::new(ListZonesRequest {}))
@ -115,30 +126,30 @@ impl ZoneListCommand {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
table.set_header(vec!["name", "uuid", "state", "ipv4", "ipv6"]);
for zone in zones {
let ipv4 = zone
.state
.status
.as_ref()
.and_then(|x| x.network.as_ref())
.and_then(|x| x.network_status.as_ref())
.map(|x| x.zone_ipv4.as_str())
.unwrap_or("n/a");
let ipv6 = zone
.state
.status
.as_ref()
.and_then(|x| x.network.as_ref())
.and_then(|x| x.network_status.as_ref())
.map(|x| x.zone_ipv6.as_str())
.unwrap_or("n/a");
let Some(spec) = zone.spec else {
continue;
};
let status = zone.state.as_ref().cloned().unwrap_or_default().status();
let status_text = zone_status_text(status);
let state = zone.status.as_ref().cloned().unwrap_or_default().state();
let status_text = zone_state_text(state);
let status_color = match status {
ZoneStatus::Destroyed | ZoneStatus::Failed => Color::Red,
ZoneStatus::Destroying | ZoneStatus::Exited | ZoneStatus::Starting => Color::Yellow,
ZoneStatus::Started => Color::Green,
let status_color = match state {
ZoneState::Destroyed | ZoneState::Failed => Color::Red,
ZoneState::Destroying | ZoneState::Exited | ZoneState::Creating => Color::Yellow,
ZoneState::Created => Color::Green,
_ => Color::Reset,
};

View File

@ -33,7 +33,7 @@ impl ZoneLogsCommand {
let zone_id_stream = zone_id.clone();
let follow = self.follow;
let input = stream! {
yield ZoneConsoleRequest { zone_id: zone_id_stream, data: Vec::new() };
yield ZoneConsoleRequest { zone_id: zone_id_stream, replay_history: true, data: Vec::new() };
if follow {
let mut pending = pending::<ZoneConsoleRequest>();
while let Some(x) = pending.next().await {
@ -43,7 +43,7 @@ impl ZoneLogsCommand {
};
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, false).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
let code = select! {
x = stdout_handle => {

View File

@ -14,6 +14,7 @@ use crate::cli::zone::logs::ZoneLogsCommand;
use crate::cli::zone::metrics::ZoneMetricsCommand;
use crate::cli::zone::resolve::ZoneResolveCommand;
use crate::cli::zone::top::ZoneTopCommand;
use crate::cli::zone::update_resources::ZoneUpdateResourcesCommand;
use crate::cli::zone::watch::ZoneWatchCommand;
pub mod attach;
@ -25,6 +26,7 @@ pub mod logs;
pub mod metrics;
pub mod resolve;
pub mod top;
pub mod update_resources;
pub mod watch;
#[derive(Parser)]
@ -44,6 +46,7 @@ impl ZoneCommand {
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand)]
pub enum ZoneCommands {
Attach(ZoneAttachCommand),
@ -56,6 +59,7 @@ pub enum ZoneCommands {
Resolve(ZoneResolveCommand),
Top(ZoneTopCommand),
Watch(ZoneWatchCommand),
UpdateResources(ZoneUpdateResourcesCommand),
}
impl ZoneCommands {
@ -84,6 +88,8 @@ impl ZoneCommands {
ZoneCommands::Top(top) => top.run(client, events).await,
ZoneCommands::Exec(exec) => exec.run(client).await,
ZoneCommands::UpdateResources(update_resources) => update_resources.run(client).await,
}
}
}

View File

@ -1,6 +1,6 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest};
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest};
use tonic::{transport::Channel, Request};
@ -14,13 +14,13 @@ pub struct ZoneResolveCommand {
impl ZoneResolveCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let reply = client
.resolve_zone(Request::new(ResolveZoneRequest {
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
name: self.zone.clone(),
}))
.await?
.into_inner();
if let Some(zone) = reply.zone {
println!("{}", zone.id);
if !reply.zone_id.is_empty() {
println!("{}", reply.zone_id);
} else {
std::process::exit(1);
}

View File

@ -24,7 +24,7 @@ use ratatui::{
};
use crate::{
format::zone_status_text,
format::zone_state_text,
metrics::{
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
},
@ -106,13 +106,13 @@ impl ZoneTopApp {
break;
}
}
};
}
}
Ok(())
}
fn render_frame(&mut self, frame: &mut Frame) {
frame.render_widget(self, frame.size());
frame.render_widget(self, frame.area());
}
fn handle_event(&mut self, event: Event) -> io::Result<()> {
@ -157,7 +157,7 @@ impl Widget for &mut ZoneTopApp {
continue;
};
let Some(ref state) = ms.zone.state else {
let Some(ref status) = ms.zone.status else {
continue;
};
@ -177,7 +177,7 @@ impl Widget for &mut ZoneTopApp {
let row = Row::new(vec![
spec.name.clone(),
ms.zone.id.clone(),
zone_status_text(state.status()),
zone_state_text(status.state()),
memory_total.unwrap_or_default(),
memory_used.unwrap_or_default(),
memory_free.unwrap_or_default(),

View File

@ -0,0 +1,93 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::{
common::ZoneResourceSpec,
control::{control_service_client::ControlServiceClient, UpdateZoneResourcesRequest},
};
use crate::cli::resolve_zone;
use krata::v1::control::GetZoneRequest;
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Update the available resources to a zone")]
pub struct ZoneUpdateResourcesCommand {
#[arg(help = "Zone to update resources of, either the name or the uuid")]
zone: String,
#[arg(
short = 'C',
long = "max-cpus",
default_value_t = 0,
help = "Maximum vCPUs available to the zone (0 means previous value)"
)]
max_cpus: u32,
#[arg(
short = 'c',
long = "target-cpus",
default_value_t = 0,
help = "Target vCPUs for the zone to use (0 means previous value)"
)]
target_cpus: u32,
#[arg(
short = 'M',
long = "max-memory",
default_value_t = 0,
help = "Maximum memory available to the zone, in megabytes (0 means previous value)"
)]
max_memory: u64,
#[arg(
short = 'm',
long = "target-memory",
default_value_t = 0,
help = "Target memory for the zone to use, in megabytes (0 means previous value)"
)]
target_memory: u64,
}
impl ZoneUpdateResourcesCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let zone_id = resolve_zone(&mut client, &self.zone).await?;
let zone = client
.get_zone(GetZoneRequest { zone_id })
.await?
.into_inner()
.zone
.unwrap_or_default();
let active_resources = zone
.status
.clone()
.unwrap_or_default()
.resource_status
.unwrap_or_default()
.active_resources
.unwrap_or_default();
client
.update_zone_resources(Request::new(UpdateZoneResourcesRequest {
zone_id: zone.id.clone(),
resources: Some(ZoneResourceSpec {
max_memory: if self.max_memory == 0 {
active_resources.max_memory
} else {
self.max_memory
},
target_memory: if self.target_memory == 0 {
active_resources.target_memory
} else {
self.target_memory
},
max_cpus: if self.max_cpus == 0 {
active_resources.max_cpus
} else {
self.max_cpus
},
target_cpus: if self.target_cpus == 0 {
active_resources.target_cpus
} else {
self.target_cpus
},
}),
}))
.await?;
Ok(())
}
}

View File

@ -1,22 +1,22 @@
use anyhow::{anyhow, Result};
use anyhow::Result;
use async_stream::stream;
use crossterm::{
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
tty::IsTty,
};
use krata::v1::common::ZoneState;
use krata::{
events::EventStream,
v1::{
common::ZoneStatus,
control::{
watch_events_reply::Event, ExecZoneReply, ExecZoneRequest, ZoneConsoleReply,
v1::common::TerminalSize,
v1::control::{
watch_events_reply::Event, ExecInsideZoneReply, ExecInsideZoneRequest, ZoneConsoleReply,
ZoneConsoleRequest,
},
},
};
use log::debug;
use tokio::{
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
select,
task::JoinHandle,
};
use tokio_stream::{Stream, StreamExt};
@ -24,11 +24,19 @@ use tonic::Streaming;
pub struct StdioConsoleStream;
enum ExecStdinSelect {
DataRead(std::io::Result<usize>),
TerminalResize,
}
impl StdioConsoleStream {
pub async fn stdin_stream(zone: String) -> impl Stream<Item = ZoneConsoleRequest> {
pub async fn stdin_stream(
zone: String,
replay_history: bool,
) -> impl Stream<Item = ZoneConsoleRequest> {
let mut stdin = stdin();
stream! {
yield ZoneConsoleRequest { zone_id: zone, data: vec![] };
yield ZoneConsoleRequest { zone_id: zone, replay_history, data: vec![] };
let mut buffer = vec![0u8; 60];
loop {
@ -43,38 +51,118 @@ impl StdioConsoleStream {
if size == 1 && buffer[0] == 0x1d {
break;
}
yield ZoneConsoleRequest { zone_id: String::default(), data };
yield ZoneConsoleRequest { zone_id: String::default(), replay_history, data };
}
}
}
pub async fn stdin_stream_exec(
initial: ExecZoneRequest,
) -> impl Stream<Item = ExecZoneRequest> {
#[cfg(unix)]
pub async fn input_stream_exec(
initial: ExecInsideZoneRequest,
tty: bool,
) -> impl Stream<Item = ExecInsideZoneRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
let mut terminal_size_change = if tty {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::window_change()).ok()
} else {
None
};
let mut stdin_closed = false;
loop {
let size = match stdin.read(&mut buffer).await {
Ok(size) => size,
let selected = if let Some(ref mut terminal_size_change) = terminal_size_change {
if stdin_closed {
select! {
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
}
} else {
select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
}
}
} else {
select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
}
};
match selected {
ExecStdinSelect::DataRead(result) => {
match result {
Ok(size) => {
let stdin = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
stdin_closed = size == 0;
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
},
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
};
let data = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
yield ExecZoneRequest { zone_id: String::default(), task: None, data };
},
ExecStdinSelect::TerminalResize => {
if let Ok((columns, rows)) = crossterm::terminal::size() {
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: Some(TerminalSize {
rows: rows as u32,
columns: columns as u32,
}), stdin: vec![], stdin_closed: false, };
}
}
}
}
}
}
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>) -> Result<()> {
if stdin().is_tty() {
#[cfg(not(unix))]
pub async fn input_stream_exec(
initial: ExecInsideZoneRequest,
_tty: bool,
) -> impl Stream<Item = ExecInsideZoneRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
let mut stdin_closed = false;
loop {
let selected = select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
};
match selected {
ExecStdinSelect::DataRead(result) => {
match result {
Ok(size) => {
let stdin = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
stdin_closed = size == 0;
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
},
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
}
},
_ => {
continue;
}
}
}
}
}
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>, raw: bool) -> Result<()> {
if raw && stdin().is_tty() {
enable_raw_mode()?;
StdioConsoleStream::register_terminal_restore_hook()?;
}
@ -90,7 +178,11 @@ impl StdioConsoleStream {
Ok(())
}
pub async fn exec_output(mut stream: Streaming<ExecZoneReply>) -> Result<i32> {
pub async fn exec_output(mut stream: Streaming<ExecInsideZoneReply>, raw: bool) -> Result<i32> {
if raw {
enable_raw_mode()?;
StdioConsoleStream::register_terminal_restore_hook()?;
}
let mut stdout = stdout();
let mut stderr = stderr();
while let Some(reply) = stream.next().await {
@ -109,7 +201,12 @@ impl StdioConsoleStream {
return if reply.error.is_empty() {
Ok(reply.exit_code)
} else {
Err(anyhow!("exec failed: {}", reply.error))
StdioConsoleStream::restore_terminal_mode();
stderr
.write_all(format!("Error: exec failed: {}\n", reply.error).as_bytes())
.await?;
stderr.flush().await?;
Ok(-1)
};
}
}
@ -128,7 +225,7 @@ impl StdioConsoleStream {
continue;
};
let Some(state) = zone.state else {
let Some(status) = zone.status else {
continue;
};
@ -136,12 +233,12 @@ impl StdioConsoleStream {
continue;
}
if let Some(exit_info) = state.exit_info {
return Some(exit_info.code);
if let Some(exit_status) = status.exit_status {
return Some(exit_status.code);
}
let status = state.status();
if status == ZoneStatus::Destroying || status == ZoneStatus::Destroyed {
let state = status.state();
if state == ZoneState::Destroying || state == ZoneState::Destroyed {
return Some(10);
}
}

View File

@ -3,11 +3,12 @@ use std::{collections::HashMap, time::Duration};
use anyhow::Result;
use fancy_duration::FancyDuration;
use human_bytes::human_bytes;
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneStatus};
use prost_reflect::{DynamicMessage, ReflectMessage};
use prost_types::Value;
use termtree::Tree;
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneState};
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
Ok(DynamicMessage::decode(
proto.descriptor(),
@ -75,30 +76,30 @@ pub fn kv2line(map: HashMap<String, String>) -> String {
.join(" ")
}
pub fn zone_status_text(status: ZoneStatus) -> String {
pub fn zone_state_text(status: ZoneState) -> String {
match status {
ZoneStatus::Starting => "starting",
ZoneStatus::Started => "started",
ZoneStatus::Destroying => "destroying",
ZoneStatus::Destroyed => "destroyed",
ZoneStatus::Exited => "exited",
ZoneStatus::Failed => "failed",
ZoneState::Creating => "creating",
ZoneState::Created => "created",
ZoneState::Destroying => "destroying",
ZoneState::Destroyed => "destroyed",
ZoneState::Exited => "exited",
ZoneState::Failed => "failed",
_ => "unknown",
}
.to_string()
}
pub fn zone_simple_line(zone: &Zone) -> String {
let state = zone_status_text(
zone.state
let state = zone_state_text(
zone.status
.as_ref()
.map(|x| x.status())
.unwrap_or(ZoneStatus::Unknown),
.map(|x| x.state())
.unwrap_or(ZoneState::Unknown),
);
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
let network = zone.state.as_ref().and_then(|x| x.network.as_ref());
let ipv4 = network.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
let ipv6 = network.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
let network_status = zone.status.as_ref().and_then(|x| x.network_status.as_ref());
let ipv4 = network_status.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
let ipv6 = network_status.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
}

View File

@ -1,8 +1,10 @@
use crate::format::metrics_value_pretty;
use anyhow::Result;
use krata::v1::common::ZoneState;
use krata::{
events::EventStream,
v1::{
common::{Zone, ZoneMetricNode, ZoneStatus},
common::{Zone, ZoneMetricNode},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
ListZonesRequest, ReadZoneMetricsRequest,
@ -19,8 +21,6 @@ use tokio::{
};
use tonic::transport::Channel;
use crate::format::metrics_value_pretty;
pub struct MetricState {
pub zone: Zone,
pub root: Option<ZoneMetricNode>,
@ -86,11 +86,11 @@ impl MultiMetricCollector {
let Some(zone) = changed.zone else {
continue;
};
let Some(ref state) = zone.state else {
let Some(ref status) = zone.status else {
continue;
};
zones.retain(|x| x.id != zone.id);
if state.status() != ZoneStatus::Destroying {
if status.state() != ZoneState::Destroying {
zones.push(zone);
}
false
@ -112,11 +112,11 @@ impl MultiMetricCollector {
let mut metrics = Vec::new();
for zone in &zones {
let Some(ref state) = zone.state else {
let Some(ref status) = zone.status else {
continue;
};
if state.status() != ZoneStatus::Started {
if status.state() != ZoneState::Created {
continue;
}

View File

@ -9,6 +9,7 @@ edition = "2021"
resolver = "2"
[dependencies]
krata-advmac = { workspace = true }
anyhow = { workspace = true }
async-stream = { workspace = true }
async-trait = { workspace = true }
@ -17,14 +18,16 @@ circular-buffer = { workspace = true }
clap = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.14" }
krata-oci = { path = "../oci", version = "^0.0.14" }
krata-runtime = { path = "../runtime", version = "^0.0.14" }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.20" }
krata-oci = { path = "../oci", version = "^0.0.20" }
krata-runtime = { path = "../runtime", version = "^0.0.20" }
log = { workspace = true }
prost = { workspace = true }
redb = { workspace = true }
scopeguard = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
signal-hook = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }

View File

@ -15,7 +15,7 @@ use kratad::command::DaemonCommand;
async fn main() -> Result<()> {
let mut builder = env_logger::Builder::new();
builder
.filter_level(LevelFilter::Trace)
.filter_level(LevelFilter::Info)
.parse_default_env()
.filter(Some("backhand::filesystem::writer"), LevelFilter::Warn);

View File

@ -10,6 +10,8 @@ pub struct DaemonConfig {
pub oci: OciConfig,
#[serde(default)]
pub pci: DaemonPciConfig,
#[serde(default = "default_network")]
pub network: DaemonNetworkConfig,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
@ -49,15 +51,74 @@ pub enum DaemonPciDeviceRdmReservePolicy {
Relaxed,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonNetworkConfig {
#[serde(default = "default_network_nameservers")]
pub nameservers: Vec<String>,
#[serde(default = "default_network_ipv4")]
pub ipv4: DaemonIpv4NetworkConfig,
#[serde(default = "default_network_ipv6")]
pub ipv6: DaemonIpv6NetworkConfig,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonIpv4NetworkConfig {
#[serde(default = "default_network_ipv4_subnet")]
pub subnet: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonIpv6NetworkConfig {
#[serde(default = "default_network_ipv6_subnet")]
pub subnet: String,
}
fn default_network() -> DaemonNetworkConfig {
DaemonNetworkConfig {
nameservers: default_network_nameservers(),
ipv4: default_network_ipv4(),
ipv6: default_network_ipv6(),
}
}
fn default_network_nameservers() -> Vec<String> {
vec![
"1.1.1.1".to_string(),
"1.0.0.1".to_string(),
"2606:4700:4700::1111".to_string(),
"2606:4700:4700::1001".to_string(),
]
}
fn default_network_ipv4() -> DaemonIpv4NetworkConfig {
DaemonIpv4NetworkConfig {
subnet: default_network_ipv4_subnet(),
}
}
fn default_network_ipv4_subnet() -> String {
"10.75.0.0/16".to_string()
}
fn default_network_ipv6() -> DaemonIpv6NetworkConfig {
DaemonIpv6NetworkConfig {
subnet: default_network_ipv6_subnet(),
}
}
fn default_network_ipv6_subnet() -> String {
"fdd4:1476:6c7e::/48".to_string()
}
impl DaemonConfig {
pub async fn load(path: &Path) -> Result<DaemonConfig> {
if path.exists() {
if !path.exists() {
let config: DaemonConfig = toml::from_str("")?;
let content = toml::to_string_pretty(&config)?;
fs::write(&path, content).await?;
}
let content = fs::read_to_string(path).await?;
let config: DaemonConfig = toml::from_str(&content)?;
Ok(config)
} else {
fs::write(&path, "").await?;
Ok(DaemonConfig::default())
}
}
}

View File

@ -24,7 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
#[derive(Clone)]
pub struct DaemonConsoleHandle {
glt: ZoneLookupTable,
zlt: ZoneLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
sender: Sender<(u32, Vec<u8>)>,
@ -57,7 +57,7 @@ impl DaemonConsoleHandle {
uuid: Uuid,
sender: Sender<Vec<u8>>,
) -> Result<DaemonConsoleAttachHandle> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
let buffers = self.buffers.lock().await;
@ -84,7 +84,7 @@ impl Drop for DaemonConsoleHandle {
}
pub struct DaemonConsole {
glt: ZoneLookupTable,
zlt: ZoneLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
receiver: Receiver<(u32, Option<Vec<u8>>)>,
@ -93,14 +93,14 @@ pub struct DaemonConsole {
}
impl DaemonConsole {
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonConsole> {
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonConsole> {
let (service, sender, receiver) =
ChannelService::new("krata-console".to_string(), Some(0)).await?;
let task = service.launch().await?;
let listeners = Arc::new(Mutex::new(HashMap::new()));
let buffers = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonConsole {
glt,
zlt,
listeners,
buffers,
receiver,
@ -110,7 +110,7 @@ impl DaemonConsole {
}
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
let glt = self.glt.clone();
let zlt = self.zlt.clone();
let listeners = self.listeners.clone();
let buffers = self.buffers.clone();
let sender = self.sender.clone();
@ -120,7 +120,7 @@ impl DaemonConsole {
}
});
Ok(DaemonConsoleHandle {
glt,
zlt,
listeners,
buffers,
sender,

View File

@ -1,611 +0,0 @@
use async_stream::try_stream;
use futures::Stream;
use krata::{
idm::internal::{
exec_stream_request_update::Update, request::Request as IdmRequestType,
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
},
v1::{
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
control::{
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
ZoneConsoleRequest,
},
},
};
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciPackedFormat, OciPackedImage},
progress::{OciProgress, OciProgressContext},
};
use kratart::Runtime;
use std::{pin::Pin, str::FromStr};
use tokio::{
select,
sync::mpsc::{channel, Sender},
task::JoinError,
};
use tokio_stream::StreamExt;
use tonic::{Request, Response, Status, Streaming};
use uuid::Uuid;
use crate::{
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
};
pub struct ApiError {
message: String,
}
impl From<anyhow::Error> for ApiError {
fn from(value: anyhow::Error) -> Self {
ApiError {
message: value.to_string(),
}
}
}
impl From<ApiError> for Status {
fn from(value: ApiError) -> Self {
Status::unknown(value.message)
}
}
#[derive(Clone)]
pub struct DaemonControlService {
glt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
}
impl DaemonControlService {
#[allow(clippy::too_many_arguments)]
pub fn new(
glt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
) -> Self {
Self {
glt,
devices,
events,
console,
idm,
zones,
zone_reconciler_notify,
packer,
runtime,
}
}
}
enum ConsoleDataSelect {
Read(Option<Vec<u8>>),
Write(Option<Result<ZoneConsoleRequest, Status>>),
}
enum PullImageSelect {
Progress(Option<OciProgress>),
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
}
#[tonic::async_trait]
impl ControlService for DaemonControlService {
type ExecZoneStream =
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
type AttachZoneConsoleStream =
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
type PullImageStream =
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
type WatchEventsStream =
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
type SnoopIdmStream =
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
async fn identify_host(
&self,
request: Request<IdentifyHostRequest>,
) -> Result<Response<IdentifyHostReply>, Status> {
let _ = request.into_inner();
Ok(Response::new(IdentifyHostReply {
host_domid: self.glt.host_domid(),
host_uuid: self.glt.host_uuid().to_string(),
krata_version: DaemonCommand::version(),
}))
}
async fn create_zone(
&self,
request: Request<CreateZoneRequest>,
) -> Result<Response<CreateZoneReply>, Status> {
let request = request.into_inner();
let Some(spec) = request.spec else {
return Err(ApiError {
message: "zone spec not provided".to_string(),
}
.into());
};
let uuid = Uuid::new_v4();
self.zones
.update(
uuid,
Zone {
id: uuid.to_string(),
state: Some(ZoneState {
status: ZoneStatus::Starting.into(),
network: None,
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
domid: u32::MAX,
}),
spec: Some(spec),
},
)
.await
.map_err(ApiError::from)?;
self.zone_reconciler_notify
.send(uuid)
.await
.map_err(|x| ApiError {
message: x.to_string(),
})?;
Ok(Response::new(CreateZoneReply {
zone_id: uuid.to_string(),
}))
}
async fn exec_zone(
&self,
request: Request<Streaming<ExecZoneRequest>>,
) -> Result<Response<Self::ExecZoneStream>, Status> {
let mut input = request.into_inner();
let Some(request) = input.next().await else {
return Err(ApiError {
message: "expected to have at least one request".to_string(),
}
.into());
};
let request = request?;
let Some(task) = request.task else {
return Err(ApiError {
message: "task is missing".to_string(),
}
.into());
};
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let idm_request = IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Start(ExecStreamRequestStart {
environment: task
.environment
.into_iter()
.map(|x| ExecEnvVar {
key: x.key,
value: x.value,
})
.collect(),
command: task.command,
working_directory: task.working_directory,
})),
})),
};
let output = try_stream! {
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
message: x.to_string(),
})?;
loop {
select! {
x = input.next() => if let Some(update) = x {
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
message: error.to_string()
}.into());
if let Ok(update) = update {
if !update.data.is_empty() {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Stdin(ExecStreamRequestStdin {
data: update.data,
})),
}))}).await;
}
}
},
x = handle.receiver.recv() => match x {
Some(response) => {
let Some(IdmResponseType::ExecStream(update)) = response.response else {
break;
};
let reply = ExecZoneReply {
exited: update.exited,
error: update.error,
exit_code: update.exit_code,
stdout: update.stdout,
stderr: update.stderr
};
yield reply;
},
None => {
break;
}
}
};
}
};
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
}
async fn destroy_zone(
&self,
request: Request<DestroyZoneRequest>,
) -> Result<Response<DestroyZoneReply>, Status> {
let request = request.into_inner();
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let Some(mut zone) = self.zones.read(uuid).await.map_err(ApiError::from)? else {
return Err(ApiError {
message: "zone not found".to_string(),
}
.into());
};
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
return Err(ApiError {
message: "zone already destroyed".to_string(),
}
.into());
}
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
self.zones
.update(uuid, zone)
.await
.map_err(ApiError::from)?;
self.zone_reconciler_notify
.send(uuid)
.await
.map_err(|x| ApiError {
message: x.to_string(),
})?;
Ok(Response::new(DestroyZoneReply {}))
}
async fn list_zones(
&self,
request: Request<ListZonesRequest>,
) -> Result<Response<ListZonesReply>, Status> {
let _ = request.into_inner();
let zones = self.zones.list().await.map_err(ApiError::from)?;
let zones = zones.into_values().collect::<Vec<Zone>>();
Ok(Response::new(ListZonesReply { zones }))
}
async fn resolve_zone(
&self,
request: Request<ResolveZoneRequest>,
) -> Result<Response<ResolveZoneReply>, Status> {
let request = request.into_inner();
let zones = self.zones.list().await.map_err(ApiError::from)?;
let zones = zones
.into_values()
.filter(|x| {
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
(!request.name.is_empty() && comparison_spec.name == request.name)
|| x.id == request.name
})
.collect::<Vec<Zone>>();
Ok(Response::new(ResolveZoneReply {
zone: zones.first().cloned(),
}))
}
async fn attach_zone_console(
&self,
request: Request<Streaming<ZoneConsoleRequest>>,
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
let mut input = request.into_inner();
let Some(request) = input.next().await else {
return Err(ApiError {
message: "expected to have at least one request".to_string(),
}
.into());
};
let request = request?;
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let (sender, mut receiver) = channel(100);
let console = self
.console
.attach(uuid, sender)
.await
.map_err(|error| ApiError {
message: format!("failed to attach to console: {}", error),
})?;
let output = try_stream! {
yield ZoneConsoleReply { data: console.initial.clone(), };
loop {
let what = select! {
x = receiver.recv() => ConsoleDataSelect::Read(x),
x = input.next() => ConsoleDataSelect::Write(x),
};
match what {
ConsoleDataSelect::Read(Some(data)) => {
yield ZoneConsoleReply { data, };
},
ConsoleDataSelect::Read(None) => {
break;
}
ConsoleDataSelect::Write(Some(request)) => {
let request = request?;
if !request.data.is_empty() {
console.send(request.data).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
}
},
ConsoleDataSelect::Write(None) => {
break;
}
}
}
};
Ok(Response::new(
Box::pin(output) as Self::AttachZoneConsoleStream
))
}
async fn read_zone_metrics(
&self,
request: Request<ReadZoneMetricsRequest>,
) -> Result<Response<ReadZoneMetricsReply>, Status> {
let request = request.into_inner();
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
message: error.to_string(),
})?;
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
let response = client
.send(IdmRequest {
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
})
.await
.map_err(|error| ApiError {
message: error.to_string(),
})?;
let mut reply = ReadZoneMetricsReply::default();
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
reply.root = metrics.root.map(idm_metric_to_api);
}
Ok(Response::new(reply))
}
async fn pull_image(
&self,
request: Request<PullImageRequest>,
) -> Result<Response<Self::PullImageStream>, Status> {
let request = request.into_inner();
let name = ImageName::parse(&request.image).map_err(|err| ApiError {
message: err.to_string(),
})?;
let format = match request.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => OciPackedFormat::Tar,
};
let (context, mut receiver) = OciProgressContext::create();
let our_packer = self.packer.clone();
let output = try_stream! {
let mut task = tokio::task::spawn(async move {
our_packer.request(name, format, request.overwrite_cache, request.update, context).await
});
let abort_handle = task.abort_handle();
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
handle.abort();
});
loop {
let what = select! {
x = receiver.changed() => match x {
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
Err(_) => PullImageSelect::Progress(None),
},
x = &mut task => PullImageSelect::Completed(x),
};
match what {
PullImageSelect::Progress(Some(progress)) => {
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: OciImageFormat::Unknown.into(),
};
yield reply;
},
PullImageSelect::Completed(result) => {
let result = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let packed = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let reply = PullImageReply {
progress: None,
digest: packed.digest,
format: match packed.format {
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
},
};
yield reply;
break;
},
_ => {
continue;
}
}
}
};
Ok(Response::new(Box::pin(output) as Self::PullImageStream))
}
async fn watch_events(
&self,
request: Request<WatchEventsRequest>,
) -> Result<Response<Self::WatchEventsStream>, Status> {
let _ = request.into_inner();
let mut events = self.events.subscribe();
let output = try_stream! {
while let Ok(event) = events.recv().await {
yield WatchEventsReply { event: Some(event), };
}
};
Ok(Response::new(Box::pin(output) as Self::WatchEventsStream))
}
async fn snoop_idm(
&self,
request: Request<SnoopIdmRequest>,
) -> Result<Response<Self::SnoopIdmStream>, Status> {
let _ = request.into_inner();
let mut messages = self.idm.snoop();
let glt = self.glt.clone();
let output = try_stream! {
while let Ok(event) = messages.recv().await {
let Some(from_uuid) = glt.lookup_uuid_by_domid(event.from).await else {
continue;
};
let Some(to_uuid) = glt.lookup_uuid_by_domid(event.to).await else {
continue;
};
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
}
};
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
}
async fn list_devices(
&self,
request: Request<ListDevicesRequest>,
) -> Result<Response<ListDevicesReply>, Status> {
let _ = request.into_inner();
let mut devices = Vec::new();
let state = self.devices.copy().await.map_err(|error| ApiError {
message: error.to_string(),
})?;
for (name, state) in state {
devices.push(DeviceInfo {
name,
claimed: state.owner.is_some(),
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
});
}
Ok(Response::new(ListDevicesReply { devices }))
}
async fn get_host_cpu_topology(
&self,
request: Request<HostCpuTopologyRequest>,
) -> Result<Response<HostCpuTopologyReply>, Status> {
let _ = request.into_inner();
let power = self
.runtime
.power_management_context()
.await
.map_err(ApiError::from)?;
let cputopo = power.cpu_topology().await.map_err(ApiError::from)?;
let mut cpus = vec![];
for cpu in cputopo {
cpus.push(HostCpuTopologyInfo {
core: cpu.core,
socket: cpu.socket,
node: cpu.node,
thread: cpu.thread,
class: cpu.class as i32,
})
}
Ok(Response::new(HostCpuTopologyReply { cpus }))
}
async fn set_host_power_management_policy(
&self,
request: Request<HostPowerManagementPolicy>,
) -> Result<Response<HostPowerManagementPolicy>, Status> {
let policy = request.into_inner();
let power = self
.runtime
.power_management_context()
.await
.map_err(ApiError::from)?;
let scheduler = &policy.scheduler;
power
.set_smt_policy(policy.smt_awareness)
.await
.map_err(ApiError::from)?;
power
.set_scheduler_policy(scheduler)
.await
.map_err(ApiError::from)?;
Ok(Response::new(HostPowerManagementPolicy {
scheduler: scheduler.to_string(),
smt_awareness: policy.smt_awareness,
}))
}
}

View File

@ -0,0 +1,84 @@
use std::pin::Pin;
use std::str::FromStr;
use anyhow::{anyhow, Result};
use async_stream::try_stream;
use tokio::select;
use tokio::sync::mpsc::channel;
use tokio_stream::{Stream, StreamExt};
use tonic::{Status, Streaming};
use uuid::Uuid;
use krata::v1::control::{ZoneConsoleReply, ZoneConsoleRequest};
use crate::console::DaemonConsoleHandle;
use crate::control::ApiError;
enum ConsoleDataSelect {
Read(Option<Vec<u8>>),
Write(Option<Result<ZoneConsoleRequest, Status>>),
}
pub struct AttachZoneConsoleRpc {
console: DaemonConsoleHandle,
}
impl AttachZoneConsoleRpc {
pub fn new(console: DaemonConsoleHandle) -> Self {
Self { console }
}
pub async fn process(
self,
mut input: Streaming<ZoneConsoleRequest>,
) -> Result<Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>>
{
let Some(request) = input.next().await else {
return Err(anyhow!("expected to have at least one request"));
};
let request = request?;
let uuid = Uuid::from_str(&request.zone_id)?;
let (sender, mut receiver) = channel(100);
let console = self
.console
.attach(uuid, sender)
.await
.map_err(|error| anyhow!("failed to attach to console: {}", error))?;
let output = try_stream! {
if request.replay_history {
yield ZoneConsoleReply { data: console.initial.clone(), };
}
loop {
let what = select! {
x = receiver.recv() => ConsoleDataSelect::Read(x),
x = input.next() => ConsoleDataSelect::Write(x),
};
match what {
ConsoleDataSelect::Read(Some(data)) => {
yield ZoneConsoleReply { data, };
},
ConsoleDataSelect::Read(None) => {
break;
}
ConsoleDataSelect::Write(Some(request)) => {
let request = request?;
if !request.data.is_empty() {
console.send(request.data).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
}
},
ConsoleDataSelect::Write(None) => {
break;
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -0,0 +1,56 @@
use crate::db::zone::ZoneStore;
use crate::zlt::ZoneLookupTable;
use anyhow::{anyhow, Result};
use krata::v1::common::{Zone, ZoneState, ZoneStatus};
use krata::v1::control::{CreateZoneReply, CreateZoneRequest};
use tokio::sync::mpsc::Sender;
use uuid::Uuid;
pub struct CreateZoneRpc {
zones: ZoneStore,
zlt: ZoneLookupTable,
zone_reconciler_notify: Sender<Uuid>,
}
impl CreateZoneRpc {
pub fn new(
zones: ZoneStore,
zlt: ZoneLookupTable,
zone_reconciler_notify: Sender<Uuid>,
) -> Self {
Self {
zones,
zlt,
zone_reconciler_notify,
}
}
pub async fn process(self, request: CreateZoneRequest) -> Result<CreateZoneReply> {
let Some(spec) = request.spec else {
return Err(anyhow!("zone spec not provided"));
};
let uuid = Uuid::new_v4();
self.zones
.update(
uuid,
Zone {
id: uuid.to_string(),
status: Some(ZoneStatus {
state: ZoneState::Creating.into(),
network_status: None,
exit_status: None,
error_status: None,
resource_status: None,
host: self.zlt.host_uuid().to_string(),
domid: u32::MAX,
}),
spec: Some(spec),
},
)
.await?;
self.zone_reconciler_notify.send(uuid).await?;
Ok(CreateZoneReply {
zone_id: uuid.to_string(),
})
}
}

View File

@ -0,0 +1,42 @@
use std::str::FromStr;
use anyhow::{anyhow, Result};
use tokio::sync::mpsc::Sender;
use uuid::Uuid;
use krata::v1::common::ZoneState;
use krata::v1::control::{DestroyZoneReply, DestroyZoneRequest};
use crate::db::zone::ZoneStore;
pub struct DestroyZoneRpc {
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
}
impl DestroyZoneRpc {
pub fn new(zones: ZoneStore, zone_reconciler_notify: Sender<Uuid>) -> Self {
Self {
zones,
zone_reconciler_notify,
}
}
pub async fn process(self, request: DestroyZoneRequest) -> Result<DestroyZoneReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let Some(mut zone) = self.zones.read(uuid).await? else {
return Err(anyhow!("zone not found"));
};
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
if zone.status.as_ref().unwrap().state() == ZoneState::Destroyed {
return Err(anyhow!("zone already destroyed"));
}
zone.status.as_mut().unwrap().state = ZoneState::Destroying.into();
self.zones.update(uuid, zone).await?;
self.zone_reconciler_notify.send(uuid).await?;
Ok(DestroyZoneReply {})
}
}

View File

@ -0,0 +1,133 @@
use std::pin::Pin;
use std::str::FromStr;
use anyhow::{anyhow, Result};
use async_stream::try_stream;
use tokio::select;
use tokio_stream::{Stream, StreamExt};
use tonic::{Status, Streaming};
use uuid::Uuid;
use krata::idm::internal::Request;
use krata::{
idm::internal::{
exec_stream_request_update::Update, request::Request as IdmRequestType,
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
ExecStreamRequestStdin, ExecStreamRequestTerminalSize, ExecStreamRequestUpdate,
Request as IdmRequest,
},
v1::control::{ExecInsideZoneReply, ExecInsideZoneRequest},
};
use crate::control::ApiError;
use crate::idm::DaemonIdmHandle;
pub struct ExecInsideZoneRpc {
idm: DaemonIdmHandle,
}
impl ExecInsideZoneRpc {
pub fn new(idm: DaemonIdmHandle) -> Self {
Self { idm }
}
pub async fn process(
self,
mut input: Streaming<ExecInsideZoneRequest>,
) -> Result<Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>>
{
let Some(request) = input.next().await else {
return Err(anyhow!("expected to have at least one request"));
};
let request = request?;
let Some(task) = request.task else {
return Err(anyhow!("task is missing"));
};
let uuid = Uuid::from_str(&request.zone_id)?;
let idm = self.idm.client(uuid).await?;
let idm_request = Request {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Start(ExecStreamRequestStart {
environment: task
.environment
.into_iter()
.map(|x| ExecEnvVar {
key: x.key,
value: x.value,
})
.collect(),
command: task.command,
working_directory: task.working_directory,
tty: task.tty,
terminal_size: request.terminal_size.map(|size| {
ExecStreamRequestTerminalSize {
rows: size.rows,
columns: size.columns,
}
}),
})),
})),
};
let output = try_stream! {
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
message: x.to_string(),
})?;
loop {
select! {
x = input.next() => if let Some(update) = x {
let update: Result<ExecInsideZoneRequest, Status> = update.map_err(|error| ApiError {
message: error.to_string()
}.into());
if let Ok(update) = update {
if !update.stdin.is_empty() {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Stdin(ExecStreamRequestStdin {
data: update.stdin,
closed: update.stdin_closed,
})),
}))}).await;
}
if let Some(ref terminal_size) = update.terminal_size {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::TerminalResize(ExecStreamRequestTerminalSize {
rows: terminal_size.rows,
columns: terminal_size.columns,
})),
}))}).await;
}
}
},
x = handle.receiver.recv() => match x {
Some(response) => {
let Some(IdmResponseType::ExecStream(update)) = response.response else {
break;
};
let reply = ExecInsideZoneReply {
exited: update.exited,
error: update.error,
exit_code: update.exit_code,
stdout: update.stdout,
stderr: update.stderr,
};
yield reply;
},
None => {
break;
}
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -0,0 +1,33 @@
use anyhow::Result;
use krata::v1::control::{GetHostCpuTopologyReply, GetHostCpuTopologyRequest, HostCpuTopologyInfo};
use kratart::Runtime;
pub struct GetHostCpuTopologyRpc {
runtime: Runtime,
}
impl GetHostCpuTopologyRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
_request: GetHostCpuTopologyRequest,
) -> Result<GetHostCpuTopologyReply> {
let power = self.runtime.power_management_context().await?;
let cpu_topology = power.cpu_topology().await?;
let mut cpus = vec![];
for cpu in cpu_topology {
cpus.push(HostCpuTopologyInfo {
core: cpu.core,
socket: cpu.socket,
node: cpu.node,
thread: cpu.thread,
class: cpu.class as i32,
})
}
Ok(GetHostCpuTopologyReply { cpus })
}
}

View File

@ -0,0 +1,37 @@
use crate::command::DaemonCommand;
use crate::network::assignment::NetworkAssignment;
use crate::zlt::ZoneLookupTable;
use anyhow::Result;
use krata::v1::control::{GetHostStatusReply, GetHostStatusRequest};
pub struct GetHostStatusRpc {
network: NetworkAssignment,
zlt: ZoneLookupTable,
}
impl GetHostStatusRpc {
pub fn new(ip: NetworkAssignment, zlt: ZoneLookupTable) -> Self {
Self { network: ip, zlt }
}
pub async fn process(self, _request: GetHostStatusRequest) -> Result<GetHostStatusReply> {
let host_reservation = self.network.retrieve(self.zlt.host_uuid()).await?;
Ok(GetHostStatusReply {
host_domid: self.zlt.host_domid(),
host_uuid: self.zlt.host_uuid().to_string(),
krata_version: DaemonCommand::version(),
host_ipv4: host_reservation
.as_ref()
.map(|x| format!("{}/{}", x.ipv4, x.ipv4_prefix))
.unwrap_or_default(),
host_ipv6: host_reservation
.as_ref()
.map(|x| format!("{}/{}", x.ipv6, x.ipv6_prefix))
.unwrap_or_default(),
host_mac: host_reservation
.as_ref()
.map(|x| x.mac.to_string().to_lowercase().replace('-', ":"))
.unwrap_or_default(),
})
}
}

View File

@ -0,0 +1,24 @@
use std::str::FromStr;
use anyhow::Result;
use uuid::Uuid;
use krata::v1::control::{GetZoneReply, GetZoneRequest};
use crate::db::zone::ZoneStore;
pub struct GetZoneRpc {
zones: ZoneStore,
}
impl GetZoneRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, request: GetZoneRequest) -> Result<GetZoneReply> {
let mut zones = self.zones.list().await?;
let zone = zones.remove(&Uuid::from_str(&request.zone_id)?);
Ok(GetZoneReply { zone })
}
}

View File

@ -0,0 +1,28 @@
use anyhow::Result;
use krata::v1::control::{DeviceInfo, ListDevicesReply, ListDevicesRequest};
use crate::devices::DaemonDeviceManager;
pub struct ListDevicesRpc {
devices: DaemonDeviceManager,
}
impl ListDevicesRpc {
pub fn new(devices: DaemonDeviceManager) -> Self {
Self { devices }
}
pub async fn process(self, _request: ListDevicesRequest) -> Result<ListDevicesReply> {
let mut devices = Vec::new();
let state = self.devices.copy().await?;
for (name, state) in state {
devices.push(DeviceInfo {
name,
claimed: state.owner.is_some(),
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
});
}
Ok(ListDevicesReply { devices })
}
}

View File

@ -0,0 +1,28 @@
use anyhow::Result;
use krata::v1::{
common::NetworkReservation,
control::{ListNetworkReservationsReply, ListNetworkReservationsRequest},
};
use crate::network::assignment::NetworkAssignment;
pub struct ListNetworkReservationsRpc {
network: NetworkAssignment,
}
impl ListNetworkReservationsRpc {
pub fn new(network: NetworkAssignment) -> Self {
Self { network }
}
pub async fn process(
self,
_request: ListNetworkReservationsRequest,
) -> Result<ListNetworkReservationsReply> {
let state = self.network.read_reservations().await?;
let reservations: Vec<NetworkReservation> =
state.into_values().map(|x| x.into()).collect::<Vec<_>>();
Ok(ListNetworkReservationsReply { reservations })
}
}

View File

@ -0,0 +1,21 @@
use anyhow::Result;
use krata::v1::common::Zone;
use krata::v1::control::{ListZonesReply, ListZonesRequest};
use crate::db::zone::ZoneStore;
pub struct ListZonesRpc {
zones: ZoneStore,
}
impl ListZonesRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, _request: ListZonesRequest) -> Result<ListZonesReply> {
let zones = self.zones.list().await?;
let zones = zones.into_values().collect::<Vec<Zone>>();
Ok(ListZonesReply { zones })
}
}

View File

@ -0,0 +1,365 @@
use std::pin::Pin;
use anyhow::Error;
use futures::Stream;
use list_network_reservations::ListNetworkReservationsRpc;
use tokio::sync::mpsc::Sender;
use tonic::{Request, Response, Status, Streaming};
use uuid::Uuid;
use krata::v1::control::{
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest, DestroyZoneReply,
DestroyZoneRequest, ExecInsideZoneReply, ExecInsideZoneRequest, GetHostCpuTopologyReply,
GetHostCpuTopologyRequest, GetHostStatusReply, GetHostStatusRequest, ListDevicesReply,
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest, ReadZoneMetricsReply,
ReadZoneMetricsRequest, ResolveZoneIdReply, ResolveZoneIdRequest, SnoopIdmReply,
SnoopIdmRequest, UpdateZoneResourcesReply, UpdateZoneResourcesRequest, WatchEventsReply,
WatchEventsRequest, ZoneConsoleReply, ZoneConsoleRequest,
};
use krata::v1::control::{
GetZoneReply, GetZoneRequest, ListNetworkReservationsReply, ListNetworkReservationsRequest,
SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest,
};
use krataoci::packer::service::OciPackerService;
use kratart::Runtime;
use crate::control::attach_zone_console::AttachZoneConsoleRpc;
use crate::control::create_zone::CreateZoneRpc;
use crate::control::destroy_zone::DestroyZoneRpc;
use crate::control::exec_inside_zone::ExecInsideZoneRpc;
use crate::control::get_host_cpu_topology::GetHostCpuTopologyRpc;
use crate::control::get_host_status::GetHostStatusRpc;
use crate::control::get_zone::GetZoneRpc;
use crate::control::list_devices::ListDevicesRpc;
use crate::control::list_zones::ListZonesRpc;
use crate::control::pull_image::PullImageRpc;
use crate::control::read_hypervisor_console::ReadHypervisorConsoleRpc;
use crate::control::read_zone_metrics::ReadZoneMetricsRpc;
use crate::control::resolve_zone_id::ResolveZoneIdRpc;
use crate::control::set_host_power_management_policy::SetHostPowerManagementPolicyRpc;
use crate::control::snoop_idm::SnoopIdmRpc;
use crate::control::update_zone_resources::UpdateZoneResourcesRpc;
use crate::control::watch_events::WatchEventsRpc;
use crate::db::zone::ZoneStore;
use crate::network::assignment::NetworkAssignment;
use crate::{
console::DaemonConsoleHandle, devices::DaemonDeviceManager, event::DaemonEventContext,
idm::DaemonIdmHandle, zlt::ZoneLookupTable,
};
pub mod attach_zone_console;
pub mod create_zone;
pub mod destroy_zone;
pub mod exec_inside_zone;
pub mod get_host_cpu_topology;
pub mod get_host_status;
pub mod get_zone;
pub mod list_devices;
pub mod list_network_reservations;
pub mod list_zones;
pub mod pull_image;
pub mod read_hypervisor_console;
pub mod read_zone_metrics;
pub mod resolve_zone_id;
pub mod set_host_power_management_policy;
pub mod snoop_idm;
pub mod update_zone_resources;
pub mod watch_events;
pub struct ApiError {
message: String,
}
impl From<Error> for ApiError {
fn from(value: Error) -> Self {
ApiError {
message: value.to_string(),
}
}
}
impl From<ApiError> for Status {
fn from(value: ApiError) -> Self {
Status::unknown(value.message)
}
}
#[derive(Clone)]
pub struct DaemonControlService {
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
network: NetworkAssignment,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
}
impl DaemonControlService {
#[allow(clippy::too_many_arguments)]
pub fn new(
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
network: NetworkAssignment,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
) -> Self {
Self {
zlt,
devices,
events,
console,
idm,
zones,
network,
zone_reconciler_notify,
packer,
runtime,
}
}
}
#[tonic::async_trait]
impl ControlService for DaemonControlService {
async fn get_host_status(
&self,
request: Request<GetHostStatusRequest>,
) -> Result<Response<GetHostStatusReply>, Status> {
let request = request.into_inner();
adapt(
GetHostStatusRpc::new(self.network.clone(), self.zlt.clone())
.process(request)
.await,
)
}
type SnoopIdmStream =
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
async fn snoop_idm(
&self,
request: Request<SnoopIdmRequest>,
) -> Result<Response<Self::SnoopIdmStream>, Status> {
let request = request.into_inner();
adapt(
SnoopIdmRpc::new(self.idm.clone(), self.zlt.clone())
.process(request)
.await,
)
}
async fn get_host_cpu_topology(
&self,
request: Request<GetHostCpuTopologyRequest>,
) -> Result<Response<GetHostCpuTopologyReply>, Status> {
let request = request.into_inner();
adapt(
GetHostCpuTopologyRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
async fn set_host_power_management_policy(
&self,
request: Request<SetHostPowerManagementPolicyRequest>,
) -> Result<Response<SetHostPowerManagementPolicyReply>, Status> {
let request = request.into_inner();
adapt(
SetHostPowerManagementPolicyRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
async fn list_devices(
&self,
request: Request<ListDevicesRequest>,
) -> Result<Response<ListDevicesReply>, Status> {
let request = request.into_inner();
adapt(
ListDevicesRpc::new(self.devices.clone())
.process(request)
.await,
)
}
async fn list_network_reservations(
&self,
request: Request<ListNetworkReservationsRequest>,
) -> Result<Response<ListNetworkReservationsReply>, Status> {
let request = request.into_inner();
adapt(
ListNetworkReservationsRpc::new(self.network.clone())
.process(request)
.await,
)
}
type PullImageStream =
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
async fn pull_image(
&self,
request: Request<PullImageRequest>,
) -> Result<Response<Self::PullImageStream>, Status> {
let request = request.into_inner();
adapt(
PullImageRpc::new(self.packer.clone())
.process(request)
.await,
)
}
async fn create_zone(
&self,
request: Request<CreateZoneRequest>,
) -> Result<Response<CreateZoneReply>, Status> {
let request = request.into_inner();
adapt(
CreateZoneRpc::new(
self.zones.clone(),
self.zlt.clone(),
self.zone_reconciler_notify.clone(),
)
.process(request)
.await,
)
}
async fn destroy_zone(
&self,
request: Request<DestroyZoneRequest>,
) -> Result<Response<DestroyZoneReply>, Status> {
let request = request.into_inner();
adapt(
DestroyZoneRpc::new(self.zones.clone(), self.zone_reconciler_notify.clone())
.process(request)
.await,
)
}
async fn resolve_zone_id(
&self,
request: Request<ResolveZoneIdRequest>,
) -> Result<Response<ResolveZoneIdReply>, Status> {
let request = request.into_inner();
adapt(
ResolveZoneIdRpc::new(self.zones.clone())
.process(request)
.await,
)
}
async fn get_zone(
&self,
request: Request<GetZoneRequest>,
) -> Result<Response<GetZoneReply>, Status> {
let request = request.into_inner();
adapt(GetZoneRpc::new(self.zones.clone()).process(request).await)
}
async fn update_zone_resources(
&self,
request: Request<UpdateZoneResourcesRequest>,
) -> Result<Response<UpdateZoneResourcesReply>, Status> {
let request = request.into_inner();
adapt(
UpdateZoneResourcesRpc::new(self.runtime.clone(), self.zones.clone())
.process(request)
.await,
)
}
async fn list_zones(
&self,
request: Request<ListZonesRequest>,
) -> Result<Response<ListZonesReply>, Status> {
let request = request.into_inner();
adapt(ListZonesRpc::new(self.zones.clone()).process(request).await)
}
type AttachZoneConsoleStream =
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
async fn attach_zone_console(
&self,
request: Request<Streaming<ZoneConsoleRequest>>,
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
let input = request.into_inner();
adapt(
AttachZoneConsoleRpc::new(self.console.clone())
.process(input)
.await,
)
}
type ExecInsideZoneStream =
Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>;
async fn exec_inside_zone(
&self,
request: Request<Streaming<ExecInsideZoneRequest>>,
) -> Result<Response<Self::ExecInsideZoneStream>, Status> {
let input = request.into_inner();
adapt(
ExecInsideZoneRpc::new(self.idm.clone())
.process(input)
.await,
)
}
async fn read_zone_metrics(
&self,
request: Request<ReadZoneMetricsRequest>,
) -> Result<Response<ReadZoneMetricsReply>, Status> {
let request = request.into_inner();
adapt(
ReadZoneMetricsRpc::new(self.idm.clone())
.process(request)
.await,
)
}
type WatchEventsStream =
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
async fn watch_events(
&self,
request: Request<WatchEventsRequest>,
) -> Result<Response<Self::WatchEventsStream>, Status> {
let request = request.into_inner();
adapt(
WatchEventsRpc::new(self.events.clone())
.process(request)
.await,
)
}
async fn read_hypervisor_console(
&self,
request: Request<ReadHypervisorConsoleRequest>,
) -> Result<Response<ReadHypervisorConsoleReply>, Status> {
let request = request.into_inner();
adapt(
ReadHypervisorConsoleRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
}
fn adapt<T>(result: anyhow::Result<T>) -> Result<Response<T>, Status> {
result
.map(Response::new)
.map_err(|error| Status::unknown(error.to_string()))
}

View File

@ -0,0 +1,100 @@
use crate::control::ApiError;
use crate::oci::convert_oci_progress;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::common::OciImageFormat;
use krata::v1::control::{PullImageReply, PullImageRequest};
use krataoci::name::ImageName;
use krataoci::packer::service::OciPackerService;
use krataoci::packer::{OciPackedFormat, OciPackedImage};
use krataoci::progress::{OciProgress, OciProgressContext};
use std::pin::Pin;
use tokio::select;
use tokio::task::JoinError;
use tokio_stream::Stream;
use tonic::Status;
enum PullImageSelect {
Progress(Option<OciProgress>),
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
}
pub struct PullImageRpc {
packer: OciPackerService,
}
impl PullImageRpc {
pub fn new(packer: OciPackerService) -> Self {
Self { packer }
}
pub async fn process(
self,
request: PullImageRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>> {
let name = ImageName::parse(&request.image)?;
let format = match request.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => OciPackedFormat::Tar,
};
let (context, mut receiver) = OciProgressContext::create();
let our_packer = self.packer;
let output = try_stream! {
let mut task = tokio::task::spawn(async move {
our_packer.request(name, format, request.overwrite_cache, request.update, context).await
});
let abort_handle = task.abort_handle();
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
handle.abort();
});
loop {
let what = select! {
x = receiver.changed() => match x {
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
Err(_) => PullImageSelect::Progress(None),
},
x = &mut task => PullImageSelect::Completed(x),
};
match what {
PullImageSelect::Progress(Some(progress)) => {
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: OciImageFormat::Unknown.into(),
};
yield reply;
},
PullImageSelect::Completed(result) => {
let result = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let packed = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let reply = PullImageReply {
progress: None,
digest: packed.digest,
format: match packed.format {
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
},
};
yield reply;
break;
},
_ => {
continue;
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -0,0 +1,23 @@
use anyhow::Result;
use krata::v1::control::{ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest};
use kratart::Runtime;
pub struct ReadHypervisorConsoleRpc {
runtime: Runtime,
}
impl ReadHypervisorConsoleRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
_: ReadHypervisorConsoleRequest,
) -> Result<ReadHypervisorConsoleReply> {
let data = self.runtime.read_hypervisor_console(false).await?;
Ok(ReadHypervisorConsoleReply {
data: data.to_string(),
})
}
}

View File

@ -0,0 +1,40 @@
use std::str::FromStr;
use anyhow::Result;
use uuid::Uuid;
use krata::idm::internal::MetricsRequest;
use krata::idm::internal::{
request::Request as IdmRequestType, response::Response as IdmResponseType,
Request as IdmRequest,
};
use krata::v1::control::{ReadZoneMetricsReply, ReadZoneMetricsRequest};
use crate::idm::DaemonIdmHandle;
use crate::metrics::idm_metric_to_api;
pub struct ReadZoneMetricsRpc {
idm: DaemonIdmHandle,
}
impl ReadZoneMetricsRpc {
pub fn new(idm: DaemonIdmHandle) -> Self {
Self { idm }
}
pub async fn process(self, request: ReadZoneMetricsRequest) -> Result<ReadZoneMetricsReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let client = self.idm.client(uuid).await?;
let response = client
.send(IdmRequest {
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
})
.await?;
let mut reply = ReadZoneMetricsReply::default();
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
reply.root = metrics.root.map(idm_metric_to_api);
}
Ok(reply)
}
}

View File

@ -0,0 +1,30 @@
use anyhow::Result;
use krata::v1::common::Zone;
use krata::v1::control::{ResolveZoneIdReply, ResolveZoneIdRequest};
use crate::db::zone::ZoneStore;
pub struct ResolveZoneIdRpc {
zones: ZoneStore,
}
impl ResolveZoneIdRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, request: ResolveZoneIdRequest) -> Result<ResolveZoneIdReply> {
let zones = self.zones.list().await?;
let zones = zones
.into_values()
.filter(|x| {
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
(!request.name.is_empty() && comparison_spec.name == request.name)
|| x.id == request.name
})
.collect::<Vec<Zone>>();
Ok(ResolveZoneIdReply {
zone_id: zones.first().cloned().map(|x| x.id).unwrap_or_default(),
})
}
}

View File

@ -0,0 +1,25 @@
use anyhow::Result;
use krata::v1::control::{SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest};
use kratart::Runtime;
pub struct SetHostPowerManagementPolicyRpc {
runtime: Runtime,
}
impl SetHostPowerManagementPolicyRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
request: SetHostPowerManagementPolicyRequest,
) -> Result<SetHostPowerManagementPolicyReply> {
let power = self.runtime.power_management_context().await?;
let scheduler = &request.scheduler;
power.set_smt_policy(request.smt_awareness).await?;
power.set_scheduler_policy(scheduler).await?;
Ok(SetHostPowerManagementPolicyReply {})
}
}

View File

@ -0,0 +1,39 @@
use crate::idm::DaemonIdmHandle;
use crate::zlt::ZoneLookupTable;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::control::{SnoopIdmReply, SnoopIdmRequest};
use std::pin::Pin;
use tokio_stream::Stream;
use tonic::Status;
pub struct SnoopIdmRpc {
idm: DaemonIdmHandle,
zlt: ZoneLookupTable,
}
impl SnoopIdmRpc {
pub fn new(idm: DaemonIdmHandle, zlt: ZoneLookupTable) -> Self {
Self { idm, zlt }
}
pub async fn process(
self,
_request: SnoopIdmRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>> {
let mut messages = self.idm.snoop();
let zlt = self.zlt.clone();
let output = try_stream! {
while let Ok(event) = messages.recv().await {
let Some(from_uuid) = zlt.lookup_uuid_by_domid(event.from).await else {
continue;
};
let Some(to_uuid) = zlt.lookup_uuid_by_domid(event.to).await else {
continue;
};
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
}
};
Ok(Box::pin(output))
}
}

View File

@ -0,0 +1,82 @@
use std::str::FromStr;
use anyhow::{anyhow, Result};
use uuid::Uuid;
use krata::v1::common::{ZoneResourceStatus, ZoneState};
use krata::v1::control::{UpdateZoneResourcesReply, UpdateZoneResourcesRequest};
use kratart::Runtime;
use crate::db::zone::ZoneStore;
pub struct UpdateZoneResourcesRpc {
runtime: Runtime,
zones: ZoneStore,
}
impl UpdateZoneResourcesRpc {
pub fn new(runtime: Runtime, zones: ZoneStore) -> Self {
Self { runtime, zones }
}
pub async fn process(
self,
request: UpdateZoneResourcesRequest,
) -> Result<UpdateZoneResourcesReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let Some(mut zone) = self.zones.read(uuid).await? else {
return Err(anyhow!("zone not found"));
};
let Some(ref mut status) = zone.status else {
return Err(anyhow!("zone state not available"));
};
if status.state() != ZoneState::Created {
return Err(anyhow!("zone is in an invalid state"));
}
if status.domid == 0 || status.domid == u32::MAX {
return Err(anyhow!("zone domid is invalid"));
}
let mut resources = request.resources.unwrap_or_default();
if resources.target_memory > resources.max_memory {
resources.max_memory = resources.target_memory;
}
if resources.target_cpus < 1 {
resources.target_cpus = 1;
}
let initial_resources = zone
.spec
.clone()
.unwrap_or_default()
.initial_resources
.unwrap_or_default();
if resources.target_cpus > initial_resources.max_cpus {
resources.target_cpus = initial_resources.max_cpus;
}
resources.max_cpus = initial_resources.max_cpus;
self.runtime
.set_memory_resources(
status.domid,
resources.target_memory * 1024 * 1024,
resources.max_memory * 1024 * 1024,
)
.await
.map_err(|error| anyhow!("failed to set memory resources: {}", error))?;
self.runtime
.set_cpu_resources(status.domid, resources.target_cpus)
.await
.map_err(|error| anyhow!("failed to set cpu resources: {}", error))?;
status.resource_status = Some(ZoneResourceStatus {
active_resources: Some(resources),
});
self.zones.update(uuid, zone).await?;
Ok(UpdateZoneResourcesReply {})
}
}

View File

@ -0,0 +1,31 @@
use crate::event::DaemonEventContext;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::control::{WatchEventsReply, WatchEventsRequest};
use std::pin::Pin;
use tokio_stream::Stream;
use tonic::Status;
pub struct WatchEventsRpc {
events: DaemonEventContext,
}
impl WatchEventsRpc {
pub fn new(events: DaemonEventContext) -> Self {
Self { events }
}
pub async fn process(
self,
_request: WatchEventsRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>>
{
let mut events = self.events.subscribe();
let output = try_stream! {
while let Ok(event) = events.recv().await {
yield WatchEventsReply { event: Some(event), };
}
};
Ok(Box::pin(output))
}
}

View File

@ -0,0 +1,21 @@
use anyhow::Result;
use redb::Database;
use std::path::Path;
use std::sync::Arc;
pub mod network;
pub mod zone;
#[derive(Clone)]
pub struct KrataDatabase {
pub database: Arc<Database>,
}
impl KrataDatabase {
pub fn open(path: &Path) -> Result<Self> {
let database = Database::create(path)?;
Ok(KrataDatabase {
database: Arc::new(database),
})
}
}

View File

@ -0,0 +1,134 @@
use crate::db::KrataDatabase;
use advmac::MacAddr6;
use anyhow::Result;
use krata::v1::common::NetworkReservation as ApiNetworkReservation;
use log::error;
use redb::{ReadableTable, TableDefinition};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::{Ipv4Addr, Ipv6Addr};
use uuid::Uuid;
const NETWORK_RESERVATION_TABLE: TableDefinition<u128, &[u8]> =
TableDefinition::new("network-reservation");
#[derive(Clone)]
pub struct NetworkReservationStore {
db: KrataDatabase,
}
impl NetworkReservationStore {
pub fn open(db: KrataDatabase) -> Result<Self> {
let write = db.database.begin_write()?;
let _ = write.open_table(NETWORK_RESERVATION_TABLE);
write.commit()?;
Ok(NetworkReservationStore { db })
}
pub async fn read(&self, id: Uuid) -> Result<Option<NetworkReservation>> {
let read = self.db.database.begin_read()?;
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
let Some(entry) = table.get(id.to_u128_le())? else {
return Ok(None);
};
let bytes = entry.value();
Ok(Some(serde_json::from_slice(bytes)?))
}
pub async fn list(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
enum ListEntry {
Valid(Uuid, NetworkReservation),
Invalid(Uuid),
}
let mut reservations: HashMap<Uuid, NetworkReservation> = HashMap::new();
let corruptions = {
let read = self.db.database.begin_read()?;
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
table
.iter()?
.flat_map(|result| {
result.map(|(key, value)| {
let uuid = Uuid::from_u128_le(key.value());
match serde_json::from_slice::<NetworkReservation>(value.value()) {
Ok(reservation) => ListEntry::Valid(uuid, reservation),
Err(error) => {
error!(
"found invalid network reservation in database for uuid {}: {}",
uuid, error
);
ListEntry::Invalid(uuid)
}
}
})
})
.filter_map(|entry| match entry {
ListEntry::Valid(uuid, reservation) => {
reservations.insert(uuid, reservation);
None
}
ListEntry::Invalid(uuid) => Some(uuid),
})
.collect::<Vec<Uuid>>()
};
if !corruptions.is_empty() {
let write = self.db.database.begin_write()?;
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
for corruption in corruptions {
table.remove(corruption.to_u128_le())?;
}
}
Ok(reservations)
}
pub async fn update(&self, id: Uuid, entry: NetworkReservation) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
let bytes = serde_json::to_vec(&entry)?;
table.insert(id.to_u128_le(), bytes.as_slice())?;
}
write.commit()?;
Ok(())
}
pub async fn remove(&self, id: Uuid) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
table.remove(id.to_u128_le())?;
}
write.commit()?;
Ok(())
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NetworkReservation {
pub uuid: String,
pub ipv4: Ipv4Addr,
pub ipv6: Ipv6Addr,
pub mac: MacAddr6,
pub ipv4_prefix: u8,
pub ipv6_prefix: u8,
pub gateway_ipv4: Ipv4Addr,
pub gateway_ipv6: Ipv6Addr,
pub gateway_mac: MacAddr6,
}
impl From<NetworkReservation> for ApiNetworkReservation {
fn from(val: NetworkReservation) -> Self {
ApiNetworkReservation {
uuid: val.uuid,
ipv4: format!("{}/{}", val.ipv4, val.ipv4_prefix),
ipv6: format!("{}/{}", val.ipv6, val.ipv6_prefix),
mac: val.mac.to_string().to_lowercase().replace('-', ":"),
gateway_ipv4: format!("{}/{}", val.gateway_ipv4, val.ipv4_prefix),
gateway_ipv6: format!("{}/{}", val.gateway_ipv6, val.ipv6_prefix),
gateway_mac: val.gateway_mac.to_string().to_lowercase().replace('-', ":"),
}
}
}

View File

@ -1,33 +1,31 @@
use std::{collections::HashMap, path::Path, sync::Arc};
use std::collections::HashMap;
use crate::db::KrataDatabase;
use anyhow::Result;
use krata::v1::common::Zone;
use log::error;
use prost::Message;
use redb::{Database, ReadableTable, TableDefinition};
use redb::{ReadableTable, TableDefinition};
use uuid::Uuid;
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
const ZONE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new("zone");
#[derive(Clone)]
pub struct ZoneStore {
database: Arc<Database>,
db: KrataDatabase,
}
impl ZoneStore {
pub fn open(path: &Path) -> Result<Self> {
let database = Database::create(path)?;
let write = database.begin_write()?;
let _ = write.open_table(ZONES);
pub fn open(db: KrataDatabase) -> Result<Self> {
let write = db.database.begin_write()?;
let _ = write.open_table(ZONE_TABLE);
write.commit()?;
Ok(ZoneStore {
database: Arc::new(database),
})
Ok(ZoneStore { db })
}
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
let read = self.database.begin_read()?;
let table = read.open_table(ZONES)?;
let read = self.db.database.begin_read()?;
let table = read.open_table(ZONE_TABLE)?;
let Some(entry) = table.get(id.to_u128_le())? else {
return Ok(None);
};
@ -37,8 +35,8 @@ impl ZoneStore {
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
let read = self.database.begin_read()?;
let table = read.open_table(ZONES)?;
let read = self.db.database.begin_read()?;
let table = read.open_table(ZONE_TABLE)?;
for result in table.iter()? {
let (key, value) = result?;
let uuid = Uuid::from_u128_le(key.value());
@ -58,9 +56,9 @@ impl ZoneStore {
}
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
let write = self.database.begin_write()?;
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(ZONES)?;
let mut table = write.open_table(ZONE_TABLE)?;
let bytes = entry.encode_to_vec();
table.insert(id.to_u128_le(), bytes.as_slice())?;
}
@ -69,9 +67,9 @@ impl ZoneStore {
}
pub async fn remove(&self, id: Uuid) -> Result<()> {
let write = self.database.begin_write()?;
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(ZONES)?;
let mut table = write.open_table(ZONE_TABLE)?;
table.remove(id.to_u128_le())?;
}
write.commit()?;

View File

@ -4,9 +4,10 @@ use std::{
time::Duration,
};
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
use crate::db::zone::ZoneStore;
use crate::idm::DaemonIdmHandle;
use anyhow::Result;
use krata::v1::common::ZoneExitInfo;
use krata::v1::common::ZoneExitStatus;
use krata::{
idm::{internal::event::Event as EventType, internal::Event},
v1::common::{ZoneState, ZoneStatus},
@ -83,15 +84,15 @@ impl DaemonEventGenerator {
return Ok(());
};
let Some(ref state) = zone.state else {
let Some(ref status) = zone.status else {
return Ok(());
};
let status = state.status();
let state = status.state();
let id = Uuid::from_str(&zone.id)?;
let domid = state.domid;
match status {
ZoneStatus::Started => {
let domid = status.domid;
match state {
ZoneState::Created => {
if let Entry::Vacant(e) = self.idms.entry(domid) {
let client = self.idm.client_by_domid(domid).await?;
let mut receiver = client.subscribe().await?;
@ -111,7 +112,7 @@ impl DaemonEventGenerator {
}
}
ZoneStatus::Destroyed => {
ZoneState::Destroyed => {
if let Some((_, handle)) = self.idms.remove(&domid) {
handle.abort();
}
@ -131,13 +132,14 @@ impl DaemonEventGenerator {
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
if let Some(mut zone) = self.zones.read(id).await? {
zone.state = Some(ZoneState {
status: ZoneStatus::Exited.into(),
network: zone.state.clone().unwrap_or_default().network,
exit_info: Some(ZoneExitInfo { code }),
error_info: None,
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
zone.status = Some(ZoneStatus {
state: ZoneState::Exited.into(),
network_status: zone.status.clone().unwrap_or_default().network_status,
exit_status: Some(ZoneExitStatus { code }),
error_status: None,
resource_status: zone.status.clone().unwrap_or_default().resource_status,
host: zone.status.clone().map(|x| x.host).unwrap_or_default(),
domid: zone.status.clone().map(|x| x.domid).unwrap_or(u32::MAX),
});
self.zones.update(id, zone).await?;

View File

@ -11,7 +11,7 @@ use krata::idm::{
transport::IdmTransportPacket,
};
use kratart::channel::ChannelService;
use log::{error, warn};
use log::{debug, error, warn};
use prost::Message;
use tokio::{
select,
@ -31,7 +31,7 @@ type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
#[derive(Clone)]
pub struct DaemonIdmHandle {
glt: ZoneLookupTable,
zlt: ZoneLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmTransportPacket)>,
@ -45,7 +45,7 @@ impl DaemonIdmHandle {
}
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
self.client_by_domid(domid).await
@ -72,7 +72,7 @@ pub struct DaemonIdmSnoopPacket {
}
pub struct DaemonIdm {
glt: ZoneLookupTable,
zlt: ZoneLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmTransportPacket)>,
@ -84,16 +84,21 @@ pub struct DaemonIdm {
}
impl DaemonIdm {
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonIdm> {
debug!("allocating channel service for idm");
let (service, tx_raw_sender, rx_receiver) =
ChannelService::new("krata-channel".to_string(), None).await?;
let (tx_sender, tx_receiver) = channel(100);
let (snoop_sender, _) = broadcast::channel(100);
debug!("starting idm channel service");
let task = service.launch().await?;
let clients = Arc::new(Mutex::new(HashMap::new()));
let feeds = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonIdm {
glt,
zlt,
rx_receiver,
tx_receiver,
tx_sender,
@ -106,7 +111,7 @@ impl DaemonIdm {
}
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
let glt = self.glt.clone();
let zlt = self.zlt.clone();
let clients = self.clients.clone();
let feeds = self.feeds.clone();
let tx_sender = self.tx_sender.clone();
@ -119,7 +124,7 @@ impl DaemonIdm {
}
});
Ok(DaemonIdmHandle {
glt,
zlt,
clients,
feeds,
tx_sender,
@ -128,39 +133,55 @@ impl DaemonIdm {
})
}
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
loop {
select! {
x = self.rx_receiver.recv() => match x {
Some((domid, data)) => {
async fn process_rx_packet(
&mut self,
domid: u32,
data: Option<Vec<u8>>,
buffers: &mut HashMap<u32, BytesMut>,
) -> Result<()> {
// check if data is present, if it is not, that signals a closed channel.
if let Some(data) = data {
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
buffer.extend_from_slice(&data);
loop {
// check if the buffer is less than the header size, if so, wait for more data
if buffer.len() < 6 {
break;
}
// check for the magic bytes 0xff, 0xff at the start of the message, if that doesn't
// exist, clear the buffer. this ensures that partial messages won't be processed.
if buffer[0] != 0xff || buffer[1] != 0xff {
buffer.clear();
break;
return Ok(());
}
let size = (buffer[2] as u32 | (buffer[3] as u32) << 8 | (buffer[4] as u32) << 16 | (buffer[5] as u32) << 24) as usize;
// read the size from the buffer as a little endian u32
let size = (buffer[2] as u32
| (buffer[3] as u32) << 8
| (buffer[4] as u32) << 16
| (buffer[5] as u32) << 24) as usize;
let needed = size + 6;
if buffer.len() < needed {
break;
return Ok(());
}
let mut packet = buffer.split_to(needed);
// advance the buffer by the header, leaving only the raw data.
packet.advance(6);
match IdmTransportPacket::decode(packet) {
Ok(packet) => {
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
let _ =
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds)
.await?;
let guard = self.feeds.lock().await;
if let Some(feed) = guard.get(&domid) {
let _ = feed.try_send(packet.clone());
}
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: domid, to: 0, packet });
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
from: domid,
to: 0,
packet,
});
}
Err(packet) => {
@ -174,6 +195,37 @@ impl DaemonIdm {
clients.remove(&domid);
feeds.remove(&domid);
}
Ok(())
}
async fn tx_packet(&mut self, domid: u32, packet: IdmTransportPacket) -> Result<()> {
let data = packet.encode_to_vec();
let mut buffer = vec![0u8; 6];
let length = data.len() as u32;
// magic bytes
buffer[0] = 0xff;
buffer[1] = 0xff;
// little endian u32 for message size
buffer[2] = length as u8;
buffer[3] = (length << 8) as u8;
buffer[4] = (length << 16) as u8;
buffer[5] = (length << 24) as u8;
buffer.extend_from_slice(&data);
self.tx_raw_sender.send((domid, buffer)).await?;
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
from: 0,
to: domid,
packet,
});
Ok(())
}
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
loop {
select! {
x = self.rx_receiver.recv() => match x {
Some((domid, data)) => {
self.process_rx_packet(domid, data, buffers).await?;
},
None => {
@ -182,25 +234,14 @@ impl DaemonIdm {
},
x = self.tx_receiver.recv() => match x {
Some((domid, packet)) => {
let data = packet.encode_to_vec();
let mut buffer = vec![0u8; 6];
let length = data.len() as u32;
buffer[0] = 0xff;
buffer[1] = 0xff;
buffer[2] = length as u8;
buffer[3] = (length << 8) as u8;
buffer[4] = (length << 16) as u8;
buffer[5] = (length << 24) as u8;
buffer.extend_from_slice(&data);
self.tx_raw_sender.send((domid, buffer)).await?;
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: 0, to: domid, packet });
self.tx_packet(domid, packet).await?;
},
None => {
break;
}
}
};
}
}
Ok(())
}
@ -249,9 +290,9 @@ pub struct IdmDaemonBackend {
#[async_trait::async_trait]
impl IdmBackend for IdmDaemonBackend {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>> {
if let Some(packet) = self.rx_receiver.recv().await {
Ok(packet)
Ok(vec![packet])
} else {
Err(anyhow!("idm receive channel closed"))
}

View File

@ -1,18 +1,23 @@
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
use crate::db::network::NetworkReservationStore;
use crate::db::zone::ZoneStore;
use crate::db::KrataDatabase;
use crate::network::assignment::NetworkAssignment;
use anyhow::{anyhow, Result};
use config::DaemonConfig;
use console::{DaemonConsole, DaemonConsoleHandle};
use control::DaemonControlService;
use db::ZoneStore;
use devices::DaemonDeviceManager;
use event::{DaemonEventContext, DaemonEventGenerator};
use idm::{DaemonIdm, DaemonIdmHandle};
use ipnetwork::{Ipv4Network, Ipv6Network};
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
use kratart::Runtime;
use log::info;
use log::{debug, info};
use reconcile::zone::ZoneReconciler;
use std::path::Path;
use std::time::Duration;
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
use tokio::{
fs,
net::UnixListener,
@ -33,6 +38,7 @@ pub mod devices;
pub mod event;
pub mod idm;
pub mod metrics;
pub mod network;
pub mod oci;
pub mod reconcile;
pub mod zlt;
@ -40,9 +46,10 @@ pub mod zlt;
pub struct Daemon {
store: String,
_config: Arc<DaemonConfig>,
glt: ZoneLookupTable,
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
zones: ZoneStore,
network: NetworkAssignment,
events: DaemonEventContext,
zone_reconciler_task: JoinHandle<()>,
zone_reconciler_notify: Sender<Uuid>,
@ -58,18 +65,22 @@ const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
impl Daemon {
pub async fn new(store: String) -> Result<Self> {
let store_dir = PathBuf::from(store.clone());
debug!("loading configuration");
let mut config_path = store_dir.clone();
config_path.push("config.toml");
let config = DaemonConfig::load(&config_path).await?;
let config = Arc::new(config);
debug!("initializing device manager");
let devices = DaemonDeviceManager::new(config.clone());
debug!("validating image cache directory");
let mut image_cache_dir = store_dir.clone();
image_cache_dir.push("cache");
image_cache_dir.push("image");
fs::create_dir_all(&image_cache_dir).await?;
debug!("loading zone0 uuid");
let mut host_uuid_path = store_dir.clone();
host_uuid_path.push("host.uuid");
let host_uuid = if host_uuid_path.is_file() {
@ -89,29 +100,46 @@ impl Daemon {
generated
};
debug!("validating zone asset directories");
let initrd_path = detect_zone_path(&store, "initrd")?;
let kernel_path = detect_zone_path(&store, "kernel")?;
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
debug!("initializing caches and hydrating zone state");
let seed = config.oci.seed.clone().map(PathBuf::from);
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
let runtime = Runtime::new(host_uuid).await?;
let glt = ZoneLookupTable::new(0, host_uuid);
let zones_db_path = format!("{}/zones.db", store);
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
debug!("initializing core runtime");
let runtime = Runtime::new().await?;
let zlt = ZoneLookupTable::new(0, host_uuid);
let db_path = format!("{}/krata.db", store);
let database = KrataDatabase::open(Path::new(&db_path))?;
let zones = ZoneStore::open(database.clone())?;
let (zone_reconciler_notify, zone_reconciler_receiver) =
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
let idm = DaemonIdm::new(glt.clone()).await?;
debug!("starting IDM service");
let idm = DaemonIdm::new(zlt.clone()).await?;
let idm = idm.launch().await?;
let console = DaemonConsole::new(glt.clone()).await?;
debug!("initializing console interfaces");
let console = DaemonConsole::new(zlt.clone()).await?;
let console = console.launch().await?;
let (events, generator) =
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
.await?;
let runtime_for_reconciler = runtime.dupe().await?;
let ipv4_network = Ipv4Network::from_str(&config.network.ipv4.subnet)?;
let ipv6_network = Ipv6Network::from_str(&config.network.ipv6.subnet)?;
let network_reservation_store = NetworkReservationStore::open(database)?;
let network = NetworkAssignment::new(
host_uuid,
ipv4_network,
ipv6_network,
network_reservation_store,
)
.await?;
debug!("initializing zone reconciler");
let zone_reconciler = ZoneReconciler::new(
devices.clone(),
glt.clone(),
zlt.clone(),
zones.clone(),
events.clone(),
runtime_for_reconciler,
@ -120,6 +148,8 @@ impl Daemon {
kernel_path,
initrd_path,
addons_path,
network.clone(),
config.clone(),
)?;
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
@ -127,19 +157,21 @@ impl Daemon {
// TODO: Create a way of abstracting early init tasks in kratad.
// TODO: Make initial power management policy configurable.
// FIXME: Power management hypercalls fail when running as an L1 hypervisor.
// let power = runtime.power_management_context().await?;
// power.set_smt_policy(true).await?;
// power
// .set_scheduler_policy("performance".to_string())
// .await?;
let power = runtime.power_management_context().await?;
power.set_smt_policy(true).await?;
power
.set_scheduler_policy("performance".to_string())
.await?;
info!("power management initialized");
info!("krata daemon initialized");
Ok(Self {
store,
_config: config,
glt,
zlt,
devices,
zones,
network,
events,
zone_reconciler_task,
zone_reconciler_notify,
@ -152,13 +184,15 @@ impl Daemon {
}
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
debug!("starting control service");
let control_service = DaemonControlService::new(
self.glt.clone(),
self.zlt.clone(),
self.devices.clone(),
self.events.clone(),
self.console.clone(),
self.idm.clone(),
self.zones.clone(),
self.network.clone(),
self.zone_reconciler_notify.clone(),
self.packer.clone(),
self.runtime.clone(),
@ -181,6 +215,8 @@ impl Daemon {
server = server.tls_config(tls_config)?;
}
server = server.http2_keepalive_interval(Some(Duration::from_secs(10)));
let server = server.add_service(ControlServiceServer::new(control_service));
info!("listening on address {}", addr);
match addr {

View File

@ -0,0 +1,204 @@
use advmac::MacAddr6;
use anyhow::{anyhow, Result};
use ipnetwork::{Ipv4Network, Ipv6Network};
use std::{
collections::HashMap,
net::{Ipv4Addr, Ipv6Addr},
sync::Arc,
};
use tokio::sync::RwLock;
use uuid::Uuid;
use crate::db::network::{NetworkReservation, NetworkReservationStore};
#[derive(Default, Clone)]
pub struct NetworkAssignmentState {
pub ipv4: HashMap<Ipv4Addr, NetworkReservation>,
pub ipv6: HashMap<Ipv6Addr, NetworkReservation>,
}
#[derive(Clone)]
pub struct NetworkAssignment {
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
gateway_ipv4: Ipv4Addr,
gateway_ipv6: Ipv6Addr,
gateway_mac: MacAddr6,
store: NetworkReservationStore,
state: Arc<RwLock<NetworkAssignmentState>>,
}
impl NetworkAssignment {
pub async fn new(
host_uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
store: NetworkReservationStore,
) -> Result<Self> {
let mut state = NetworkAssignment::fetch_current_state(&store).await?;
let gateway_reservation = if let Some(reservation) = store.read(Uuid::nil()).await? {
reservation
} else {
NetworkAssignment::allocate(
&mut state,
&store,
Uuid::nil(),
ipv4_network,
ipv6_network,
None,
None,
None,
)
.await?
};
if store.read(host_uuid).await?.is_none() {
let _ = NetworkAssignment::allocate(
&mut state,
&store,
host_uuid,
ipv4_network,
ipv6_network,
Some(gateway_reservation.gateway_ipv4),
Some(gateway_reservation.gateway_ipv6),
Some(gateway_reservation.gateway_mac),
)
.await?;
}
let assignment = NetworkAssignment {
ipv4_network,
ipv6_network,
gateway_ipv4: gateway_reservation.ipv4,
gateway_ipv6: gateway_reservation.ipv6,
gateway_mac: gateway_reservation.mac,
store,
state: Arc::new(RwLock::new(state)),
};
Ok(assignment)
}
async fn fetch_current_state(
store: &NetworkReservationStore,
) -> Result<NetworkAssignmentState> {
let reservations = store.list().await?;
let mut state = NetworkAssignmentState::default();
for reservation in reservations.values() {
state.ipv4.insert(reservation.ipv4, reservation.clone());
state.ipv6.insert(reservation.ipv6, reservation.clone());
}
Ok(state)
}
#[allow(clippy::too_many_arguments)]
async fn allocate(
state: &mut NetworkAssignmentState,
store: &NetworkReservationStore,
uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
gateway_ipv4: Option<Ipv4Addr>,
gateway_ipv6: Option<Ipv6Addr>,
gateway_mac: Option<MacAddr6>,
) -> Result<NetworkReservation> {
let found_ipv4: Option<Ipv4Addr> = ipv4_network
.iter()
.filter(|ip| {
ip.is_private() && !(ip.is_loopback() || ip.is_multicast() || ip.is_broadcast())
})
.filter(|ip| {
let last = ip.octets()[3];
// filter for IPs ending in .1 to .250 because .250+ can have special meaning
(1..250).contains(&last)
})
.find(|ip| !state.ipv4.contains_key(ip));
let found_ipv6: Option<Ipv6Addr> = ipv6_network
.iter()
.filter(|ip| !ip.is_loopback() && !ip.is_multicast())
.filter(|ip| {
let last = ip.octets()[15];
last > 0
})
.find(|ip| !state.ipv6.contains_key(ip));
let Some(ipv4) = found_ipv4 else {
return Err(anyhow!(
"unable to allocate ipv4 address, assigned network is exhausted"
));
};
let Some(ipv6) = found_ipv6 else {
return Err(anyhow!(
"unable to allocate ipv6 address, assigned network is exhausted"
));
};
let mut mac = MacAddr6::random();
mac.set_local(true);
mac.set_multicast(false);
let reservation = NetworkReservation {
uuid: uuid.to_string(),
ipv4,
ipv6,
mac,
ipv4_prefix: ipv4_network.prefix(),
ipv6_prefix: ipv6_network.prefix(),
gateway_ipv4: gateway_ipv4.unwrap_or(ipv4),
gateway_ipv6: gateway_ipv6.unwrap_or(ipv6),
gateway_mac: gateway_mac.unwrap_or(mac),
};
state.ipv4.insert(ipv4, reservation.clone());
state.ipv6.insert(ipv6, reservation.clone());
store.update(uuid, reservation.clone()).await?;
Ok(reservation)
}
pub async fn assign(&self, uuid: Uuid) -> Result<NetworkReservation> {
let mut state = self.state.write().await;
let reservation = NetworkAssignment::allocate(
&mut state,
&self.store,
uuid,
self.ipv4_network,
self.ipv6_network,
Some(self.gateway_ipv4),
Some(self.gateway_ipv6),
Some(self.gateway_mac),
)
.await?;
Ok(reservation)
}
pub async fn recall(&self, uuid: Uuid) -> Result<()> {
let mut state = self.state.write().await;
self.store.remove(uuid).await?;
state
.ipv4
.retain(|_, reservation| reservation.uuid != uuid.to_string());
state
.ipv6
.retain(|_, reservation| reservation.uuid != uuid.to_string());
Ok(())
}
pub async fn retrieve(&self, uuid: Uuid) -> Result<Option<NetworkReservation>> {
self.store.read(uuid).await
}
pub async fn reload(&self) -> Result<()> {
let mut state = self.state.write().await;
let intermediate = NetworkAssignment::fetch_current_state(&self.store).await?;
*state = intermediate;
Ok(())
}
pub async fn read(&self) -> Result<NetworkAssignmentState> {
Ok(self.state.read().await.clone())
}
pub async fn read_reservations(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
self.store.list().await
}
}

View File

@ -0,0 +1 @@
pub mod assignment;

View File

@ -1,41 +1,41 @@
use anyhow::{anyhow, Result};
use futures::StreamExt;
use krata::launchcfg::LaunchPackedFormat;
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
use krata::v1::common::{ZoneOciImageSpec, ZoneResourceStatus};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy, ZoneLaunchNetwork};
use kratart::{launch::ZoneLaunchRequest, Runtime};
use log::info;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use anyhow::{anyhow, Result};
use futures::StreamExt;
use krata::launchcfg::LaunchPackedFormat;
use krata::v1::common::ZoneOciImageSpec;
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
use kratart::{launch::ZoneLaunchRequest, Runtime};
use log::info;
use crate::config::DaemonPciDeviceRdmReservePolicy;
use crate::config::{DaemonConfig, DaemonPciDeviceRdmReservePolicy};
use crate::devices::DaemonDeviceManager;
use crate::{
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
zlt::ZoneLookupTable,
};
use crate::network::assignment::NetworkAssignment;
use crate::reconcile::zone::network_reservation_to_network_status;
use crate::{reconcile::zone::ZoneReconcilerResult, zlt::ZoneLookupTable};
use krata::v1::common::zone_image_spec::Image;
use tokio::fs::{self, File};
use tokio::io::AsyncReadExt;
use tokio_tar::Archive;
use uuid::Uuid;
pub struct ZoneStarter<'a> {
pub struct ZoneCreator<'a> {
pub devices: &'a DaemonDeviceManager,
pub kernel_path: &'a Path,
pub initrd_path: &'a Path,
pub addons_path: &'a Path,
pub packer: &'a OciPackerService,
pub glt: &'a ZoneLookupTable,
pub network_assignment: &'a NetworkAssignment,
pub zlt: &'a ZoneLookupTable,
pub runtime: &'a Runtime,
pub config: &'a DaemonConfig,
}
impl ZoneStarter<'_> {
impl ZoneCreator<'_> {
pub async fn oci_spec_tar_read_file(
&self,
file: &Path,
@ -75,8 +75,8 @@ impl ZoneStarter<'_> {
))
}
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let Some(ref spec) = zone.spec else {
pub async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let Some(ref mut spec) = zone.spec else {
return Err(anyhow!("zone spec not specified"));
};
@ -174,10 +174,29 @@ impl ZoneStarter<'_> {
}
}
let reservation = self.network_assignment.assign(uuid).await?;
let mut initial_resources = spec.initial_resources.unwrap_or_default();
if initial_resources.target_cpus < 1 {
initial_resources.target_cpus = 1;
}
if initial_resources.target_cpus > initial_resources.max_cpus {
initial_resources.max_cpus = initial_resources.target_cpus;
}
spec.initial_resources = Some(initial_resources);
let kernel_options = spec.kernel_options.clone().unwrap_or_default();
let info = self
.runtime
.launch(ZoneLaunchRequest {
format: LaunchPackedFormat::Squashfs,
format: match image.format {
OciPackedFormat::Squashfs => LaunchPackedFormat::Squashfs,
OciPackedFormat::Erofs => LaunchPackedFormat::Erofs,
_ => {
return Err(anyhow!(
"oci image is in an invalid format, which isn't compatible with launch"
));
}
},
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
@ -187,8 +206,10 @@ impl ZoneStarter<'_> {
image,
kernel,
initrd,
vcpus: spec.vcpus,
mem: spec.mem,
target_cpus: initial_resources.target_cpus,
max_cpus: initial_resources.max_cpus,
max_memory: initial_resources.max_memory,
target_memory: initial_resources.target_memory,
pcis,
env: task
.environment
@ -196,18 +217,32 @@ impl ZoneStarter<'_> {
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
debug: false,
kernel_verbose: kernel_options.verbose,
kernel_cmdline_append: kernel_options.cmdline_append,
addons_image: Some(self.addons_path.to_path_buf()),
network: ZoneLaunchNetwork {
ipv4: reservation.ipv4.to_string(),
ipv4_prefix: reservation.ipv4_prefix,
ipv6: reservation.ipv6.to_string(),
ipv6_prefix: reservation.ipv6_prefix,
gateway_ipv4: reservation.gateway_ipv4.to_string(),
gateway_ipv6: reservation.gateway_ipv6.to_string(),
zone_mac: reservation.mac,
nameservers: self.config.network.nameservers.clone(),
},
})
.await?;
self.glt.associate(uuid, info.domid).await;
info!("started zone {}", uuid);
zone.state = Some(ZoneState {
status: ZoneStatus::Started.into(),
network: Some(zoneinfo_to_networkstate(&info)),
exit_info: None,
error_info: None,
host: self.glt.host_uuid().to_string(),
self.zlt.associate(uuid, info.domid).await;
info!("created zone {}", uuid);
zone.status = Some(ZoneStatus {
state: ZoneState::Created.into(),
network_status: Some(network_reservation_to_network_status(&reservation)),
exit_status: None,
error_status: None,
resource_status: Some(ZoneResourceStatus {
active_resources: Some(initial_resources),
}),
host: self.zlt.host_uuid().to_string(),
domid: info.domid,
});
success.store(true, Ordering::Release);

View File

@ -5,35 +5,36 @@ use std::{
time::Duration,
};
use self::create::ZoneCreator;
use crate::config::DaemonConfig;
use crate::db::network::NetworkReservation;
use crate::network::assignment::NetworkAssignment;
use crate::{
db::zone::ZoneStore,
devices::DaemonDeviceManager,
event::{DaemonEvent, DaemonEventContext},
zlt::ZoneLookupTable,
};
use anyhow::Result;
use krata::v1::{
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
common::{Zone, ZoneErrorStatus, ZoneExitStatus, ZoneNetworkStatus, ZoneState, ZoneStatus},
control::ZoneChangedEvent,
};
use krataoci::packer::service::OciPackerService;
use kratart::{Runtime, ZoneInfo};
use kratart::Runtime;
use log::{error, info, trace, warn};
use tokio::{
select,
sync::{
mpsc::{channel, Receiver, Sender},
Mutex, RwLock,
RwLock,
},
task::JoinHandle,
time::sleep,
};
use uuid::Uuid;
use crate::{
db::ZoneStore,
devices::DaemonDeviceManager,
event::{DaemonEvent, DaemonEventContext},
zlt::ZoneLookupTable,
};
use self::start::ZoneStarter;
mod start;
mod create;
const PARALLEL_LIMIT: u32 = 5;
@ -44,16 +45,9 @@ enum ZoneReconcilerResult {
}
struct ZoneReconcilerEntry {
task: JoinHandle<()>,
sender: Sender<()>,
}
impl Drop for ZoneReconcilerEntry {
fn drop(&mut self) {
self.task.abort();
}
}
#[derive(Clone)]
pub struct ZoneReconciler {
devices: DaemonDeviceManager,
@ -65,9 +59,11 @@ pub struct ZoneReconciler {
kernel_path: PathBuf,
initrd_path: PathBuf,
addons_path: PathBuf,
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
tasks: Arc<RwLock<HashMap<Uuid, ZoneReconcilerEntry>>>,
zone_reconciler_notify: Sender<Uuid>,
zone_reconcile_lock: Arc<RwLock<()>>,
ip_assignment: NetworkAssignment,
config: Arc<DaemonConfig>,
}
impl ZoneReconciler {
@ -83,6 +79,8 @@ impl ZoneReconciler {
kernel_path: PathBuf,
initrd_path: PathBuf,
modules_path: PathBuf,
ip_assignment: NetworkAssignment,
config: Arc<DaemonConfig>,
) -> Result<Self> {
Ok(Self {
devices,
@ -94,9 +92,11 @@ impl ZoneReconciler {
kernel_path,
initrd_path,
addons_path: modules_path,
tasks: Arc::new(Mutex::new(HashMap::new())),
tasks: Arc::new(RwLock::new(HashMap::new())),
zone_reconciler_notify,
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
ip_assignment,
config,
})
}
@ -118,7 +118,7 @@ impl ZoneReconciler {
error!("failed to start zone reconciler task {}: {}", uuid, error);
}
let map = self.tasks.lock().await;
let map = self.tasks.read().await;
if let Some(entry) = map.get(&uuid) {
if let Err(error) = entry.sender.send(()).await {
error!("failed to notify zone reconciler task {}: {}", uuid, error);
@ -132,7 +132,7 @@ impl ZoneReconciler {
error!("runtime reconciler failed: {}", error);
}
}
};
}
}
}))
}
@ -166,21 +166,21 @@ impl ZoneReconciler {
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
match runtime_zone {
None => {
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
if state.status() == ZoneStatus::Started {
state.status = ZoneStatus::Starting.into();
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
if status.state() == ZoneState::Created {
status.state = ZoneState::Creating.into();
}
stored_zone.state = Some(state);
stored_zone.status = Some(status);
}
Some(runtime) => {
self.zlt.associate(uuid, runtime.domid).await;
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
if let Some(code) = runtime.state.exit_code {
state.status = ZoneStatus::Exited.into();
state.exit_info = Some(ZoneExitInfo { code });
status.state = ZoneState::Exited.into();
status.exit_status = Some(ZoneExitStatus { code });
} else {
state.status = ZoneStatus::Started.into();
status.state = ZoneState::Created.into();
}
for device in &stored_zone
@ -193,8 +193,11 @@ impl ZoneReconciler {
device_claims.insert(device.name.clone(), uuid);
}
state.network = Some(zoneinfo_to_networkstate(runtime));
stored_zone.state = Some(state);
if let Some(reservation) = self.ip_assignment.retrieve(uuid).await? {
status.network_status =
Some(network_reservation_to_network_status(&reservation));
}
stored_zone.status = Some(status);
}
}
@ -228,20 +231,20 @@ impl ZoneReconciler {
zone: Some(zone.clone()),
}))?;
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
let result = match start_status {
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
ZoneStatus::Exited => self.exited(&mut zone).await,
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
let start_state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
let result = match start_state {
ZoneState::Creating => self.create(uuid, &mut zone).await,
ZoneState::Exited => self.exited(&mut zone).await,
ZoneState::Destroying => self.destroy(uuid, &mut zone).await,
_ => Ok(ZoneReconcilerResult::Unchanged),
};
let result = match result {
Ok(result) => result,
Err(error) => {
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
zone.status.as_mut().unwrap().state = ZoneState::Failed.into();
zone.status.as_mut().unwrap().error_status = Some(ZoneErrorStatus {
message: error.to_string(),
});
warn!("failed to start zone {}: {}", zone.id, error);
@ -251,8 +254,8 @@ impl ZoneReconciler {
info!("reconciled zone {}", uuid);
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
let destroyed = status == ZoneStatus::Destroyed;
let state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
let destroyed = state == ZoneState::Destroyed;
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
@ -261,7 +264,7 @@ impl ZoneReconciler {
if destroyed {
self.zones.remove(uuid).await?;
let mut map = self.tasks.lock().await;
let mut map = self.tasks.write().await;
map.remove(&uuid);
} else {
self.zones.update(uuid, zone.clone()).await?;
@ -276,22 +279,24 @@ impl ZoneReconciler {
Ok(rerun)
}
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let starter = ZoneStarter {
async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let starter = ZoneCreator {
devices: &self.devices,
kernel_path: &self.kernel_path,
initrd_path: &self.initrd_path,
addons_path: &self.addons_path,
packer: &self.packer,
glt: &self.zlt,
network_assignment: &self.ip_assignment,
zlt: &self.zlt,
runtime: &self.runtime,
config: &self.config,
};
starter.start(uuid, zone).await
starter.create(uuid, zone).await
}
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
if let Some(ref mut state) = zone.state {
state.set_status(ZoneStatus::Destroying);
if let Some(ref mut status) = zone.status {
status.set_state(ZoneState::Destroying);
Ok(ZoneReconcilerResult::Changed { rerun: true })
} else {
Ok(ZoneReconcilerResult::Unchanged)
@ -303,18 +308,20 @@ impl ZoneReconciler {
trace!("failed to destroy runtime zone {}: {}", uuid, error);
}
let domid = zone.state.as_ref().map(|x| x.domid);
let domid = zone.status.as_ref().map(|x| x.domid);
if let Some(domid) = domid {
self.zlt.remove(uuid, domid).await;
}
info!("destroyed zone {}", uuid);
zone.state = Some(ZoneState {
status: ZoneStatus::Destroyed.into(),
network: None,
exit_info: None,
error_info: None,
self.ip_assignment.recall(uuid).await?;
zone.status = Some(ZoneStatus {
state: ZoneState::Destroyed.into(),
network_status: None,
exit_status: None,
error_status: None,
resource_status: None,
host: self.zlt.host_uuid().to_string(),
domid: domid.unwrap_or(u32::MAX),
});
@ -323,7 +330,7 @@ impl ZoneReconciler {
}
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
let mut map = self.tasks.lock().await;
let mut map = self.tasks.write().await;
match map.entry(uuid) {
Entry::Occupied(_) => {}
Entry::Vacant(entry) => {
@ -336,7 +343,7 @@ impl ZoneReconciler {
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
let this = self.clone();
let (sender, mut receiver) = channel(10);
let task = tokio::task::spawn(async move {
tokio::task::spawn(async move {
'notify_loop: loop {
if receiver.recv().await.is_none() {
break 'notify_loop;
@ -358,17 +365,17 @@ impl ZoneReconciler {
}
}
});
Ok(ZoneReconcilerEntry { task, sender })
Ok(ZoneReconcilerEntry { sender })
}
}
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
ZoneNetworkState {
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
pub fn network_reservation_to_network_status(ip: &NetworkReservation) -> ZoneNetworkStatus {
ZoneNetworkStatus {
zone_ipv4: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
zone_ipv6: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
zone_mac: ip.mac.to_string().to_lowercase().replace('-', ":"),
gateway_ipv4: format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
gateway_ipv6: format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
gateway_mac: ip.gateway_mac.to_string().to_lowercase().replace('-', ":"),
}
}

View File

@ -45,16 +45,25 @@ message ExecStreamRequestStart {
repeated ExecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
bool tty = 4;
ExecStreamRequestTerminalSize terminal_size = 5;
}
message ExecStreamRequestStdin {
bytes data = 1;
bool closed = 2;
}
message ExecStreamRequestTerminalSize {
uint32 rows = 1;
uint32 columns = 2;
}
message ExecStreamRequestUpdate {
oneof update {
ExecStreamRequestStart start = 1;
ExecStreamRequestStdin stdin = 2;
ExecStreamRequestTerminalSize terminal_resize = 3;
}
}

View File

@ -11,7 +11,7 @@ import "google/protobuf/struct.proto";
message Zone {
string id = 1;
ZoneSpec spec = 2;
ZoneState state = 3;
ZoneStatus status = 3;
}
message ZoneSpec {
@ -21,11 +21,18 @@ message ZoneSpec {
ZoneImageSpec kernel = 3;
// If not specified, defaults to the daemon default initrd.
ZoneImageSpec initrd = 4;
uint32 vcpus = 5;
uint64 mem = 6;
ZoneTaskSpec task = 7;
repeated ZoneSpecAnnotation annotations = 8;
repeated ZoneSpecDevice devices = 9;
ZoneResourceSpec initial_resources = 5;
ZoneTaskSpec task = 6;
repeated ZoneSpecAnnotation annotations = 7;
repeated ZoneSpecDevice devices = 8;
ZoneKernelOptionsSpec kernel_options = 9;
}
message ZoneResourceSpec {
uint64 max_memory = 1;
uint64 target_memory = 2;
uint32 max_cpus = 3;
uint32 target_cpus = 4;
}
message ZoneImageSpec {
@ -34,6 +41,11 @@ message ZoneImageSpec {
}
}
message ZoneKernelOptionsSpec {
bool verbose = 1;
string cmdline_append = 2;
}
enum OciImageFormat {
OCI_IMAGE_FORMAT_UNKNOWN = 0;
OCI_IMAGE_FORMAT_SQUASHFS = 1;
@ -51,6 +63,7 @@ message ZoneTaskSpec {
repeated ZoneTaskSpecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
bool tty = 4;
}
message ZoneTaskSpecEnvVar {
@ -67,26 +80,27 @@ message ZoneSpecDevice {
string name = 1;
}
message ZoneState {
ZoneStatus status = 1;
ZoneNetworkState network = 2;
ZoneExitInfo exit_info = 3;
ZoneErrorInfo error_info = 4;
message ZoneStatus {
ZoneState state = 1;
ZoneNetworkStatus network_status = 2;
ZoneExitStatus exit_status = 3;
ZoneErrorStatus error_status = 4;
string host = 5;
uint32 domid = 6;
ZoneResourceStatus resource_status = 7;
}
enum ZoneStatus {
ZONE_STATUS_UNKNOWN = 0;
ZONE_STATUS_STARTING = 1;
ZONE_STATUS_STARTED = 2;
ZONE_STATUS_EXITED = 3;
ZONE_STATUS_DESTROYING = 4;
ZONE_STATUS_DESTROYED = 5;
ZONE_STATUS_FAILED = 6;
enum ZoneState {
ZONE_STATE_UNKNOWN = 0;
ZONE_STATE_CREATING = 1;
ZONE_STATE_CREATED = 2;
ZONE_STATE_EXITED = 3;
ZONE_STATE_DESTROYING = 4;
ZONE_STATE_DESTROYED = 5;
ZONE_STATE_FAILED = 6;
}
message ZoneNetworkState {
message ZoneNetworkStatus {
string zone_ipv4 = 1;
string zone_ipv6 = 2;
string zone_mac = 3;
@ -95,14 +109,18 @@ message ZoneNetworkState {
string gateway_mac = 6;
}
message ZoneExitInfo {
message ZoneExitStatus {
int32 code = 1;
}
message ZoneErrorInfo {
message ZoneErrorStatus {
string message = 1;
}
message ZoneResourceStatus {
ZoneResourceSpec active_resources = 1;
}
message ZoneMetricNode {
string name = 1;
google.protobuf.Value value = 2;
@ -116,3 +134,18 @@ enum ZoneMetricFormat {
ZONE_METRIC_FORMAT_INTEGER = 2;
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
}
message TerminalSize {
uint32 rows = 1;
uint32 columns = 2;
}
message NetworkReservation {
string uuid = 1;
string ipv4 = 2;
string ipv6 = 3;
string mac = 4;
string gateway_ipv4 = 5;
string gateway_ipv6 = 6;
string gateway_mac = 7;
}

View File

@ -10,34 +10,46 @@ import "krata/idm/transport.proto";
import "krata/v1/common.proto";
service ControlService {
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
rpc GetHostStatus(GetHostStatusRequest) returns (GetHostStatusReply);
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
rpc GetHostCpuTopology(GetHostCpuTopologyRequest) returns (GetHostCpuTopologyReply);
rpc SetHostPowerManagementPolicy(SetHostPowerManagementPolicyRequest) returns (SetHostPowerManagementPolicyReply);
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
rpc ResolveZone(ResolveZoneRequest) returns (ResolveZoneReply);
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
rpc ExecZone(stream ExecZoneRequest) returns (stream ExecZoneReply);
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
rpc ListNetworkReservations(ListNetworkReservationsRequest) returns (ListNetworkReservationsReply);
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
rpc GetHostCpuTopology(HostCpuTopologyRequest) returns (HostCpuTopologyReply);
rpc SetHostPowerManagementPolicy(HostPowerManagementPolicy) returns (HostPowerManagementPolicy);
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
rpc ResolveZoneId(ResolveZoneIdRequest) returns (ResolveZoneIdReply);
rpc GetZone(GetZoneRequest) returns (GetZoneReply);
rpc UpdateZoneResources(UpdateZoneResourcesRequest) returns (UpdateZoneResourcesReply);
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
rpc ExecInsideZone(stream ExecInsideZoneRequest) returns (stream ExecInsideZoneReply);
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
rpc ReadHypervisorConsole(ReadHypervisorConsoleRequest) returns (ReadHypervisorConsoleReply);
}
message IdentifyHostRequest {}
message GetHostStatusRequest {}
message IdentifyHostReply {
message GetHostStatusReply {
string host_uuid = 1;
uint32 host_domid = 2;
string krata_version = 3;
string host_ipv4 = 4;
string host_ipv6 = 5;
string host_mac = 6;
}
message CreateZoneRequest {
@ -45,36 +57,46 @@ message CreateZoneRequest {
}
message CreateZoneReply {
string Zone_id = 1;
string zone_id = 1;
}
message DestroyZoneRequest {
string Zone_id = 1;
string zone_id = 1;
}
message DestroyZoneReply {}
message ResolveZoneRequest {
message ResolveZoneIdRequest {
string name = 1;
}
message ResolveZoneReply {
krata.v1.common.Zone Zone = 1;
message ResolveZoneIdReply {
string zone_id = 1;
}
message GetZoneRequest {
string zone_id = 1;
}
message GetZoneReply {
krata.v1.common.Zone zone = 1;
}
message ListZonesRequest {}
message ListZonesReply {
repeated krata.v1.common.Zone Zones = 1;
repeated krata.v1.common.Zone zones = 1;
}
message ExecZoneRequest {
string Zone_id = 1;
message ExecInsideZoneRequest {
string zone_id = 1;
krata.v1.common.ZoneTaskSpec task = 2;
bytes data = 3;
bytes stdin = 3;
bool stdin_closed = 4;
krata.v1.common.TerminalSize terminal_size = 5;
}
message ExecZoneReply {
message ExecInsideZoneReply {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
@ -83,8 +105,9 @@ message ExecZoneReply {
}
message ZoneConsoleRequest {
string Zone_id = 1;
string zone_id = 1;
bytes data = 2;
bool replay_history = 3;
}
message ZoneConsoleReply {
@ -95,16 +118,16 @@ message WatchEventsRequest {}
message WatchEventsReply {
oneof event {
ZoneChangedEvent Zone_changed = 1;
ZoneChangedEvent zone_changed = 1;
}
}
message ZoneChangedEvent {
krata.v1.common.Zone Zone = 1;
krata.v1.common.Zone zone = 1;
}
message ReadZoneMetricsRequest {
string Zone_id = 1;
string zone_id = 1;
}
message ReadZoneMetricsReply {
@ -219,15 +242,34 @@ message HostCpuTopologyInfo {
HostCpuTopologyClass class = 5;
}
message HostCpuTopologyRequest {}
message GetHostCpuTopologyRequest {}
message HostCpuTopologyReply {
message GetHostCpuTopologyReply {
repeated HostCpuTopologyInfo cpus = 1;
}
message HostPowerManagementPolicyRequest {}
message HostPowerManagementPolicy {
message SetHostPowerManagementPolicyRequest {
string scheduler = 1;
bool smt_awareness = 2;
}
message SetHostPowerManagementPolicyReply {}
message UpdateZoneResourcesRequest {
string zone_id = 1;
krata.v1.common.ZoneResourceSpec resources = 2;
}
message UpdateZoneResourcesReply {}
message ReadHypervisorConsoleRequest {}
message ReadHypervisorConsoleReply {
string data = 1;
}
message ListNetworkReservationsRequest {}
message ListNetworkReservationsReply {
repeated krata.v1.common.NetworkReservation reservations = 1;
}

View File

@ -9,13 +9,13 @@ use std::{
};
use anyhow::{anyhow, Result};
use bytes::{BufMut, BytesMut};
use bytes::{Buf, BufMut, BytesMut};
use log::{debug, error};
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
use prost::Message;
use tokio::{
fs::File,
io::{unix::AsyncFd, AsyncReadExt, AsyncWriteExt},
io::{AsyncReadExt, AsyncWriteExt},
select,
sync::{
broadcast,
@ -34,7 +34,7 @@ use super::{
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, Sender<R>>>>;
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
const IDM_PACKET_QUEUE_LEN: usize = 100;
@ -43,12 +43,13 @@ const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
#[async_trait::async_trait]
pub trait IdmBackend: Send {
async fn recv(&mut self) -> Result<IdmTransportPacket>;
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>>;
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
}
pub struct IdmFileBackend {
read_fd: Arc<Mutex<AsyncFd<File>>>,
read: Arc<Mutex<File>>,
read_buffer: BytesMut,
write: Arc<Mutex<File>>,
}
@ -57,7 +58,8 @@ impl IdmFileBackend {
IdmFileBackend::set_raw_port(&read_file)?;
IdmFileBackend::set_raw_port(&write_file)?;
Ok(IdmFileBackend {
read_fd: Arc::new(Mutex::new(AsyncFd::new(read_file)?)),
read: Arc::new(Mutex::new(read_file)),
read_buffer: BytesMut::new(),
write: Arc::new(Mutex::new(write_file)),
})
}
@ -72,26 +74,58 @@ impl IdmFileBackend {
#[async_trait::async_trait]
impl IdmBackend for IdmFileBackend {
async fn recv(&mut self) -> Result<IdmTransportPacket> {
let mut fd = self.read_fd.lock().await;
let mut guard = fd.readable_mut().await?;
let b1 = guard.get_inner_mut().read_u8().await?;
if b1 != 0xff {
return Ok(IdmTransportPacket::default());
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>> {
let mut data = vec![0; 8192];
let mut first = true;
'read_more: loop {
let mut packets = Vec::new();
if !first {
if !packets.is_empty() {
return Ok(packets);
}
let b2 = guard.get_inner_mut().read_u8().await?;
if b2 != 0xff {
return Ok(IdmTransportPacket::default());
let size = self.read.lock().await.read(&mut data).await?;
self.read_buffer.extend_from_slice(&data[0..size]);
}
let size = guard.get_inner_mut().read_u32_le().await?;
if size == 0 {
return Ok(IdmTransportPacket::default());
first = false;
loop {
if self.read_buffer.len() < 6 {
continue 'read_more;
}
let mut buffer = vec![0u8; size as usize];
guard.get_inner_mut().read_exact(&mut buffer).await?;
match IdmTransportPacket::decode(buffer.as_slice()) {
Ok(packet) => Ok(packet),
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
let b1 = self.read_buffer[0];
let b2 = self.read_buffer[1];
if b1 != 0xff || b2 != 0xff {
self.read_buffer.clear();
continue 'read_more;
}
let size = (self.read_buffer[2] as u32
| (self.read_buffer[3] as u32) << 8
| (self.read_buffer[4] as u32) << 16
| (self.read_buffer[5] as u32) << 24) as usize;
let needed = size + 6;
if self.read_buffer.len() < needed {
continue 'read_more;
}
let mut packet = self.read_buffer.split_to(needed);
packet.advance(6);
match IdmTransportPacket::decode(packet) {
Ok(packet) => {
packets.push(packet);
}
Err(error) => {
return Err(anyhow!("received invalid idm packet: {}", error));
}
}
if self.read_buffer.is_empty() {
break;
}
}
return Ok(packets);
}
}
@ -403,7 +437,8 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
loop {
select! {
x = backend.recv() => match x {
Ok(packet) => {
Ok(packets) => {
for packet in packets {
if packet.channel != channel {
continue;
}
@ -460,6 +495,7 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
IdmTransportPacketForm::StreamRequestClosed => {
let mut update_streams = request_update_streams.lock().await;
update_streams.remove(&packet.id);
println!("stream request closed: {}", packet.id);
}
IdmTransportPacketForm::StreamResponseUpdate => {
@ -478,6 +514,7 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
_ => {},
}
}
},
Err(error) => {

View File

@ -16,7 +16,7 @@ clap = { workspace = true }
env_logger = { workspace = true }
etherparse = { workspace = true }
futures = { workspace = true }
krata = { path = "../krata", version = "^0.0.14" }
krata = { path = "../krata", version = "^0.0.20" }
krata-advmac = { workspace = true }
libc = { workspace = true }
log = { workspace = true }

View File

@ -76,44 +76,44 @@ impl AutoNetworkWatcher {
let mut networks: Vec<NetworkMetadata> = Vec::new();
for (uuid, zone) in &all_zones {
let Some(ref state) = zone.state else {
let Some(ref status) = zone.status else {
continue;
};
if state.domid == u32::MAX {
if status.domid == u32::MAX {
continue;
}
let Some(ref network) = state.network else {
let Some(ref network_status) = status.network_status else {
continue;
};
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network.zone_ipv4) else {
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network_status.zone_ipv4) else {
continue;
};
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network.zone_ipv6) else {
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network_status.zone_ipv6) else {
continue;
};
let Ok(zone_mac) = EthernetAddress::from_str(&network.zone_mac) else {
let Ok(zone_mac) = EthernetAddress::from_str(&network_status.zone_mac) else {
continue;
};
let Ok(gateway_ipv4_cidr) = Ipv4Cidr::from_str(&network.gateway_ipv4) else {
let Ok(gateway_ipv4_cidr) = Ipv4Cidr::from_str(&network_status.gateway_ipv4) else {
continue;
};
let Ok(gateway_ipv6_cidr) = Ipv6Cidr::from_str(&network.gateway_ipv6) else {
let Ok(gateway_ipv6_cidr) = Ipv6Cidr::from_str(&network_status.gateway_ipv6) else {
continue;
};
let Ok(gateway_mac) = EthernetAddress::from_str(&network.gateway_mac) else {
let Ok(gateway_mac) = EthernetAddress::from_str(&network_status.gateway_mac) else {
continue;
};
networks.push(NetworkMetadata {
domid: state.domid,
domid: status.domid,
uuid: *uuid,
zone: NetworkSide {
ipv4: zone_ipv4_cidr,
@ -187,7 +187,7 @@ impl AutoNetworkWatcher {
_ = sleep(Duration::from_secs(10)) => {
break;
}
};
}
}
Ok(())
}

View File

@ -127,7 +127,8 @@ impl NetworkBackend {
let (tx_sender, tx_receiver) = channel::<BytesMut>(TX_CHANNEL_BUFFER_LEN);
let mut udev = ChannelDevice::new(mtu, Medium::Ethernet, tx_sender.clone());
let mac = self.metadata.gateway.mac;
let nat = Nat::new(mtu, proxy, mac, addresses.clone(), tx_sender.clone())?;
let local_cidrs = addresses.clone();
let nat = Nat::new(mtu, proxy, mac, local_cidrs, tx_sender.clone())?;
let hardware_addr = HardwareAddress::Ethernet(mac);
let config = Config::new(hardware_addr);
let mut iface = Interface::new(config, &mut udev, Instant::now());

View File

@ -1,21 +1,15 @@
use std::{
io::ErrorKind,
net::{IpAddr, Ipv4Addr},
};
use std::{io::ErrorKind, net::IpAddr};
use advmac::MacAddr6;
use anyhow::{anyhow, Result};
use bytes::BytesMut;
use futures::TryStreamExt;
use log::error;
use smoltcp::wire::EthernetAddress;
use smoltcp::wire::{EthernetAddress, Ipv4Cidr, Ipv6Cidr};
use tokio::{select, task::JoinHandle};
use tokio_tun::Tun;
use crate::vbridge::{BridgeJoinHandle, VirtualBridge};
const HOST_IPV4_ADDR: Ipv4Addr = Ipv4Addr::new(10, 75, 0, 1);
#[derive(Debug)]
enum HostBridgeProcessSelect {
Send(Option<BytesMut>),
@ -27,7 +21,14 @@ pub struct HostBridge {
}
impl HostBridge {
pub async fn new(mtu: usize, interface: String, bridge: &VirtualBridge) -> Result<HostBridge> {
pub async fn new(
mtu: usize,
interface: String,
bridge: &VirtualBridge,
ipv4: Ipv4Cidr,
ipv6: Ipv6Cidr,
mac: EthernetAddress,
) -> Result<HostBridge> {
let tun = Tun::builder()
.name(&interface)
.tap(true)
@ -38,10 +39,6 @@ impl HostBridge {
let (connection, handle, _) = rtnetlink::new_connection()?;
tokio::spawn(connection);
let mut mac = MacAddr6::random();
mac.set_local(true);
mac.set_multicast(false);
let mut links = handle.link().get().match_name(interface.clone()).execute();
let link = links.try_next().await?;
if link.is_none() {
@ -54,25 +51,32 @@ impl HostBridge {
handle
.address()
.add(link.header.index, IpAddr::V4(HOST_IPV4_ADDR), 16)
.add(
link.header.index,
IpAddr::V4(ipv4.address().into()),
ipv4.prefix_len(),
)
.execute()
.await?;
handle
.address()
.add(link.header.index, IpAddr::V6(mac.to_link_local_ipv6()), 10)
.add(
link.header.index,
IpAddr::V6(ipv6.address().into()),
ipv6.prefix_len(),
)
.execute()
.await?;
handle
.link()
.set(link.header.index)
.address(mac.to_array().to_vec())
.address(mac.0.to_vec())
.up()
.execute()
.await?;
let mac = EthernetAddress(mac.to_array());
let bridge_handle = bridge.join(mac).await?;
let task = tokio::task::spawn(async move {

View File

@ -1,17 +1,21 @@
use std::{collections::HashMap, time::Duration};
use std::{collections::HashMap, str::FromStr, time::Duration};
use anyhow::Result;
use anyhow::{anyhow, Result};
use autonet::{AutoNetworkChangeset, AutoNetworkWatcher, NetworkMetadata};
use futures::{future::join_all, TryFutureExt};
use hbridge::HostBridge;
use krata::{
client::ControlClientProvider,
dial::ControlDialAddress,
v1::{common::Zone, control::control_service_client::ControlServiceClient},
v1::{
common::Zone,
control::{control_service_client::ControlServiceClient, GetHostStatusRequest},
},
};
use log::warn;
use smoltcp::wire::{EthernetAddress, Ipv4Cidr, Ipv6Cidr};
use tokio::{task::JoinHandle, time::sleep};
use tonic::transport::Channel;
use tonic::{transport::Channel, Request};
use uuid::Uuid;
use vbridge::VirtualBridge;
@ -41,10 +45,27 @@ pub struct NetworkService {
impl NetworkService {
pub async fn new(control_address: ControlDialAddress) -> Result<NetworkService> {
let control = ControlClientProvider::dial(control_address).await?;
let mut control = ControlClientProvider::dial(control_address).await?;
let host_status = control
.get_host_status(Request::new(GetHostStatusRequest {}))
.await?
.into_inner();
let host_ipv4 = Ipv4Cidr::from_str(&host_status.host_ipv4)
.map_err(|_| anyhow!("failed to parse host ipv4 cidr"))?;
let host_ipv6 = Ipv6Cidr::from_str(&host_status.host_ipv6)
.map_err(|_| anyhow!("failed to parse host ipv6 cidr"))?;
let host_mac = EthernetAddress::from_str(&host_status.host_mac)
.map_err(|_| anyhow!("failed to parse host mac address"))?;
let bridge = VirtualBridge::new()?;
let hbridge =
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
let hbridge = HostBridge::new(
HOST_BRIDGE_MTU + EXTRA_MTU,
"krata0".to_string(),
&bridge,
host_ipv4,
host_ipv6,
host_mac,
)
.await?;
Ok(NetworkService {
control,
zones: HashMap::new(),

View File

@ -25,7 +25,7 @@ async fn main() -> Result<()> {
let (context, mut receiver) = OciProgressContext::create();
tokio::task::spawn(async move {
loop {
if (receiver.changed().await).is_err() {
if receiver.changed().await.is_err() {
break;
}
let progress = receiver.borrow_and_update();

View File

@ -97,13 +97,13 @@ impl OciPackerBackend for OciPackerMkSquashfs {
status = &mut wait => {
break status;
}
};
}
} else {
select! {
status = &mut wait => {
break status;
}
};
}
}
};
if let Some(writer) = writer {
@ -172,13 +172,13 @@ impl OciPackerBackend for OciPackerMkfsErofs {
status = &mut wait => {
break status;
}
};
}
} else {
select! {
status = &mut wait => {
break status;
}
};
}
}
};
if let Some(writer) = writer {

View File

@ -228,7 +228,7 @@ impl OciBoundProgress {
context.update(&progress);
let mut receiver = self.context.subscribe();
tokio::task::spawn(async move {
while (receiver.changed().await).is_ok() {
while receiver.changed().await.is_ok() {
context
.sender
.send_replace(receiver.borrow_and_update().clone());

View File

@ -12,20 +12,20 @@ resolver = "2"
anyhow = { workspace = true }
backhand = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.14" }
krata = { path = "../krata", version = "^0.0.20" }
krata-advmac = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.14" }
krata-oci = { path = "../oci", version = "^0.0.20" }
log = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
uuid = { workspace = true }
krata-loopdev = { path = "../loopdev", version = "^0.0.14" }
krata-xencall = { path = "../xen/xencall", version = "^0.0.14" }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.14" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.14" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.14" }
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.14" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.14" }
krata-loopdev = { path = "../loopdev", version = "^0.0.20" }
krata-xencall = { path = "../xen/xencall", version = "^0.0.20" }
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.20" }
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.20" }
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.20" }
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.20" }
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.20" }
walkdir = { workspace = true }
indexmap = { workspace = true }

View File

@ -8,14 +8,11 @@ use anyhow::{anyhow, Result};
use log::{debug, error};
use tokio::{
select,
sync::{
broadcast,
mpsc::{channel, Receiver, Sender},
},
sync::mpsc::{channel, Receiver, Sender},
task::JoinHandle,
time::sleep,
};
use xenevtchn::EventChannel;
use xenevtchn::EventChannelService;
use xengnt::{sys::GrantRef, GrantTab, MappedMemory};
use xenstore::{XsdClient, XsdInterface};
@ -43,7 +40,7 @@ pub struct ChannelService {
typ: String,
use_reserved_ref: Option<u64>,
backends: HashMap<u32, ChannelBackend>,
evtchn: EventChannel,
evtchn: EventChannelService,
store: XsdClient,
gnttab: GrantTab,
input_receiver: Receiver<(u32, Vec<u8>)>,
@ -62,14 +59,22 @@ impl ChannelService {
)> {
let (input_sender, input_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
let (output_sender, output_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
debug!("opening xenevtchn");
let evtchn = EventChannelService::open().await?;
debug!("opening xenstore");
let store = XsdClient::open().await?;
debug!("opening xengnt");
let gnttab = GrantTab::open()?;
Ok((
ChannelService {
typ,
use_reserved_ref,
backends: HashMap::new(),
evtchn: EventChannel::open().await?,
store: XsdClient::open().await?,
gnttab: GrantTab::open()?,
evtchn,
store,
gnttab,
input_sender: input_sender.clone(),
input_receiver,
output_sender,
@ -226,7 +231,7 @@ impl ChannelBackend {
domid: u32,
id: u32,
store: XsdClient,
evtchn: EventChannel,
evtchn: EventChannelService,
gnttab: GrantTab,
output_sender: Sender<(u32, Option<Vec<u8>>)>,
use_reserved_ref: Option<u64>,
@ -265,7 +270,7 @@ pub struct KrataChannelBackendProcessor {
id: u32,
domid: u32,
store: XsdClient,
evtchn: EventChannel,
evtchn: EventChannelService,
gnttab: GrantTab,
}
@ -484,29 +489,22 @@ impl KrataChannelBackendProcessor {
},
x = channel.receiver.recv() => match x {
Ok(_) => {
Some(_) => {
unsafe {
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;
if !buffer.is_empty() {
sender.send((self.domid, Some(buffer))).await?;
}
};
channel.unmask_sender.send(channel.local_port).await?;
channel.unmask().await?;
},
Err(error) => {
match error {
broadcast::error::RecvError::Closed => {
None => {
break;
},
error => {
return Err(anyhow!("failed to receive event notification: {}", error));
}
}
}
}
};
}
Ok(())
}

View File

@ -7,7 +7,7 @@ use std::{
use anyhow::{anyhow, Result};
use ipnetwork::{Ipv4Network, Ipv6Network};
use log::error;
use log::{debug, error};
use tokio::sync::RwLock;
use uuid::Uuid;
use xenstore::{XsdClient, XsdInterface};
@ -72,7 +72,9 @@ impl IpVendor {
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
) -> Result<Self> {
debug!("fetching state from xenstore");
let mut state = IpVendor::fetch_stored_state(&store).await?;
debug!("allocating IP set");
let (gateway_ipv4, gateway_ipv6) =
IpVendor::allocate_ipset(&mut state, host_uuid, ipv4_network, ipv6_network)?;
let vend = IpVendor {
@ -84,11 +86,14 @@ impl IpVendor {
gateway_ipv6,
state: Arc::new(RwLock::new(state)),
};
debug!("IP vendor initialized!");
Ok(vend)
}
async fn fetch_stored_state(store: &XsdClient) -> Result<IpVendorState> {
debug!("initializing default IP vendor state");
let mut state = IpVendorState::default();
debug!("iterating over xen domains");
for domid_candidate in store.list("/local/domain").await? {
let dom_path = format!("/local/domain/{}", domid_candidate);
let Some(uuid) = store
@ -119,6 +124,7 @@ impl IpVendor {
}
}
}
debug!("IP state hydrated");
Ok(state)
}

View File

@ -1,19 +1,21 @@
use std::collections::HashMap;
use std::fs;
use std::net::IpAddr;
use std::path::PathBuf;
use std::sync::Arc;
use advmac::MacAddr6;
use anyhow::{anyhow, Result};
use ipnetwork::IpNetwork;
use tokio::sync::Semaphore;
use uuid::Uuid;
use krata::launchcfg::{
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
LaunchPackedFormat, LaunchRoot,
};
use krataoci::packer::OciPackedImage;
use tokio::sync::Semaphore;
use uuid::Uuid;
pub use xenclient::{
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
};
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
use xenplatform::domain::BaseDomainConfig;
@ -22,24 +24,35 @@ use crate::RuntimeContext;
use super::{ZoneInfo, ZoneState};
pub use xenclient::{
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
};
pub struct ZoneLaunchRequest {
pub format: LaunchPackedFormat,
pub kernel: Vec<u8>,
pub initrd: Vec<u8>,
pub uuid: Option<Uuid>,
pub name: Option<String>,
pub vcpus: u32,
pub mem: u64,
pub target_cpus: u32,
pub max_cpus: u32,
pub target_memory: u64,
pub max_memory: u64,
pub env: HashMap<String, String>,
pub run: Option<Vec<String>>,
pub pcis: Vec<PciDevice>,
pub debug: bool,
pub kernel_verbose: bool,
pub kernel_cmdline_append: String,
pub image: OciPackedImage,
pub addons_image: Option<PathBuf>,
pub network: ZoneLaunchNetwork,
}
pub struct ZoneLaunchNetwork {
pub ipv4: String,
pub ipv4_prefix: u8,
pub ipv6: String,
pub ipv6_prefix: u8,
pub gateway_ipv4: String,
pub gateway_ipv6: String,
pub zone_mac: MacAddr6,
pub nameservers: Vec<String>,
}
pub struct ZoneLauncher {
@ -58,15 +71,7 @@ impl ZoneLauncher {
) -> Result<ZoneInfo> {
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
let xen_name = format!("krata-{uuid}");
let mut gateway_mac = MacAddr6::random();
gateway_mac.set_local(true);
gateway_mac.set_multicast(false);
let mut zone_mac = MacAddr6::random();
zone_mac.set_local(true);
zone_mac.set_multicast(false);
let _launch_permit = self.launch_semaphore.acquire().await?;
let mut ip = context.ipvendor.assign(uuid).await?;
let launch_config = LaunchInfo {
root: LaunchRoot {
format: request.format.clone(),
@ -81,20 +86,15 @@ impl ZoneLauncher {
network: Some(LaunchNetwork {
link: "eth0".to_string(),
ipv4: LaunchNetworkIpv4 {
address: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
gateway: ip.gateway_ipv4.to_string(),
address: format!("{}/{}", request.network.ipv4, request.network.ipv4_prefix),
gateway: request.network.gateway_ipv4,
},
ipv6: LaunchNetworkIpv6 {
address: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
gateway: ip.gateway_ipv6.to_string(),
address: format!("{}/{}", request.network.ipv6, request.network.ipv6_prefix),
gateway: request.network.gateway_ipv6.to_string(),
},
resolver: LaunchNetworkResolver {
nameservers: vec![
"1.1.1.1".to_string(),
"1.0.0.1".to_string(),
"2606:4700:4700::1111".to_string(),
"2606:4700:4700::1001".to_string(),
],
nameservers: request.network.nameservers,
},
}),
env: request.env,
@ -140,13 +140,17 @@ impl ZoneLauncher {
None
};
let mut cmdline_options = ["console=hvc0"].to_vec();
if !request.debug {
if !request.kernel_verbose {
cmdline_options.push("quiet");
}
if !request.kernel_cmdline_append.is_empty() {
cmdline_options.push(&request.kernel_cmdline_append);
}
let cmdline = cmdline_options.join(" ");
let zone_mac_string = zone_mac.to_string().replace('-', ":");
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
let zone_mac_string = request.network.zone_mac.to_string().replace('-', ":");
let mut disks = vec![
DomainDisk {
@ -190,30 +194,6 @@ impl ZoneLauncher {
let mut extra_keys = vec![
("krata/uuid".to_string(), uuid.to_string()),
("krata/loops".to_string(), loops.join(",")),
(
"krata/network/zone/ipv4".to_string(),
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
),
(
"krata/network/zone/ipv6".to_string(),
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
),
(
"krata/network/zone/mac".to_string(),
zone_mac_string.clone(),
),
(
"krata/network/gateway/ipv4".to_string(),
format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
),
(
"krata/network/gateway/ipv6".to_string(),
format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
),
(
"krata/network/gateway/mac".to_string(),
gateway_mac_string.clone(),
),
];
if let Some(name) = request.name.as_ref() {
@ -222,8 +202,10 @@ impl ZoneLauncher {
let config = DomainConfig {
base: BaseDomainConfig {
max_vcpus: request.vcpus,
mem_mb: request.mem,
max_vcpus: request.max_cpus,
target_vcpus: request.target_cpus,
max_mem_mb: request.max_memory,
target_mem_mb: request.target_memory,
kernel: request.kernel,
initrd: request.initrd,
cmdline,
@ -251,29 +233,14 @@ impl ZoneLauncher {
extra_rw_paths: vec!["krata/zone".to_string()],
};
match context.xen.create(&config).await {
Ok(created) => {
ip.commit().await?;
Ok(ZoneInfo {
Ok(created) => Ok(ZoneInfo {
name: request.name.as_ref().map(|x| x.to_string()),
uuid,
domid: created.domid,
image: request.image.digest,
loops: vec![],
zone_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
zone_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
zone_mac: Some(zone_mac_string.clone()),
gateway_ipv4: Some(IpNetwork::new(
IpAddr::V4(ip.gateway_ipv4),
ip.ipv4_prefix,
)?),
gateway_ipv6: Some(IpNetwork::new(
IpAddr::V6(ip.gateway_ipv6),
ip.ipv6_prefix,
)?),
gateway_mac: Some(gateway_mac_string.clone()),
state: ZoneState { exit_code: None },
})
}
}),
Err(error) => {
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;

View File

@ -1,13 +1,12 @@
use std::{fs, net::Ipv4Addr, path::PathBuf, str::FromStr, sync::Arc};
use anyhow::{anyhow, Result};
use ip::IpVendor;
use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network};
use krataloopdev::LoopControl;
use log::error;
use log::debug;
use std::{fs, path::PathBuf, str::FromStr, sync::Arc};
use tokio::sync::Semaphore;
use uuid::Uuid;
use xenclient::XenClient;
use xenplatform::domain::XEN_EXTRA_MEMORY_KB;
use xenstore::{XsdClient, XsdInterface};
use self::{
@ -19,7 +18,6 @@ use self::{
pub mod autoloop;
pub mod cfgblk;
pub mod channel;
pub mod ip;
pub mod launch;
pub mod power;
@ -48,12 +46,6 @@ pub struct ZoneInfo {
pub domid: u32,
pub image: String,
pub loops: Vec<ZoneLoopInfo>,
pub zone_ipv4: Option<IpNetwork>,
pub zone_ipv6: Option<IpNetwork>,
pub zone_mac: Option<String>,
pub gateway_ipv4: Option<IpNetwork>,
pub gateway_ipv6: Option<IpNetwork>,
pub gateway_mac: Option<String>,
pub state: ZoneState,
}
@ -61,20 +53,14 @@ pub struct ZoneInfo {
pub struct RuntimeContext {
pub autoloop: AutoLoop,
pub xen: XenClient<RuntimePlatform>,
pub ipvendor: IpVendor,
}
impl RuntimeContext {
pub async fn new(host_uuid: Uuid) -> Result<Self> {
pub async fn new() -> Result<Self> {
let xen = XenClient::new(0, RuntimePlatform::new()).await?;
let ipv4_network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
let ipv6_network = Ipv6Network::from_str("fdd4:1476:6c7e::/48")?;
let ipvend =
IpVendor::new(xen.store.clone(), host_uuid, ipv4_network, ipv6_network).await?;
Ok(RuntimeContext {
autoloop: AutoLoop::new(LoopControl::open()?),
xen,
ipvendor: ipvend,
})
}
@ -115,61 +101,6 @@ impl RuntimeContext {
.store
.read_string(&format!("{}/krata/loops", &dom_path))
.await?;
let zone_ipv4 = self
.xen
.store
.read_string(&format!("{}/krata/network/zone/ipv4", &dom_path))
.await?;
let zone_ipv6 = self
.xen
.store
.read_string(&format!("{}/krata/network/zone/ipv6", &dom_path))
.await?;
let zone_mac = self
.xen
.store
.read_string(&format!("{}/krata/network/zone/mac", &dom_path))
.await?;
let gateway_ipv4 = self
.xen
.store
.read_string(&format!("{}/krata/network/gateway/ipv4", &dom_path))
.await?;
let gateway_ipv6 = self
.xen
.store
.read_string(&format!("{}/krata/network/gateway/ipv6", &dom_path))
.await?;
let gateway_mac = self
.xen
.store
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
.await?;
let zone_ipv4 = if let Some(zone_ipv4) = zone_ipv4 {
IpNetwork::from_str(&zone_ipv4).ok()
} else {
None
};
let zone_ipv6 = if let Some(zone_ipv6) = zone_ipv6 {
IpNetwork::from_str(&zone_ipv6).ok()
} else {
None
};
let gateway_ipv4 = if let Some(gateway_ipv4) = gateway_ipv4 {
IpNetwork::from_str(&gateway_ipv4).ok()
} else {
None
};
let gateway_ipv6 = if let Some(gateway_ipv6) = gateway_ipv6 {
IpNetwork::from_str(&gateway_ipv6).ok()
} else {
None
};
let exit_code = self
.xen
.store
@ -190,12 +121,6 @@ impl RuntimeContext {
domid,
image,
loops,
zone_ipv4,
zone_ipv6,
zone_mac,
gateway_ipv4,
gateway_ipv6,
gateway_mac,
state,
});
}
@ -237,16 +162,21 @@ impl RuntimeContext {
#[derive(Clone)]
pub struct Runtime {
host_uuid: Uuid,
context: RuntimeContext,
launch_semaphore: Arc<Semaphore>,
}
impl Runtime {
pub async fn new(host_uuid: Uuid) -> Result<Self> {
let context = RuntimeContext::new(host_uuid).await?;
pub async fn new() -> Result<Self> {
let context = RuntimeContext::new().await?;
debug!("testing for hypervisor presence");
context
.xen
.call
.get_version_capabilities()
.await
.map_err(|_| anyhow!("hypervisor is not present"))?;
Ok(Self {
host_uuid,
context,
launch_semaphore: Arc::new(Semaphore::new(10)),
})
@ -282,11 +212,6 @@ impl Runtime {
return Err(anyhow!("unable to find krata uuid based on the domain",));
}
let uuid = Uuid::parse_str(&uuid)?;
let ip = self
.context
.ipvendor
.read_domain_assignment(uuid, domid)
.await?;
let loops = store
.read_string(format!("{}/krata/loops", dom_path).as_str())
.await?;
@ -306,29 +231,90 @@ impl Runtime {
}
}
}
if let Some(ip) = ip {
if let Err(error) = self.context.ipvendor.recall(&ip).await {
error!(
"failed to recall ip assignment for zone {}: {}",
uuid, error
);
}
}
Ok(uuid)
}
pub async fn set_memory_resources(
&self,
domid: u32,
target_memory_bytes: u64,
max_memory_bytes: u64,
) -> Result<()> {
let mut max_memory_bytes = max_memory_bytes + (XEN_EXTRA_MEMORY_KB * 1024);
if target_memory_bytes > max_memory_bytes {
max_memory_bytes = target_memory_bytes + (XEN_EXTRA_MEMORY_KB * 1024);
}
self.context
.xen
.call
.set_max_mem(domid, max_memory_bytes / 1024)
.await?;
let domain_path = self.context.xen.store.get_domain_path(domid).await?;
let tx = self.context.xen.store.transaction().await?;
let max_memory_path = format!("{}/memory/static-max", domain_path);
tx.write_string(max_memory_path, &(max_memory_bytes / 1024).to_string())
.await?;
let target_memory_path = format!("{}/memory/target", domain_path);
tx.write_string(
target_memory_path,
&(target_memory_bytes / 1024).to_string(),
)
.await?;
tx.commit().await?;
Ok(())
}
pub async fn set_cpu_resources(&self, domid: u32, target_cpus: u32) -> Result<()> {
let domain_path = self.context.xen.store.get_domain_path(domid).await?;
let cpus = self
.context
.xen
.store
.list(&format!("{}/cpu", domain_path))
.await?;
let tx = self.context.xen.store.transaction().await?;
for cpu in cpus {
let Some(id) = cpu.parse::<u32>().ok() else {
continue;
};
let available = if id >= target_cpus {
"offline"
} else {
"online"
};
tx.write_string(
format!("{}/cpu/{}/availability", domain_path, id),
available,
)
.await?;
}
tx.commit().await?;
Ok(())
}
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
self.context.list().await
}
pub async fn dupe(&self) -> Result<Runtime> {
Runtime::new(self.host_uuid).await
Runtime::new().await
}
pub async fn power_management_context(&self) -> Result<PowerManagementContext> {
let context = RuntimeContext::new(self.host_uuid).await?;
let context = RuntimeContext::new().await?;
Ok(PowerManagementContext { context })
}
pub async fn read_hypervisor_console(&self, clear: bool) -> Result<Arc<str>> {
let index = 0_u32;
let (rawbuf, newindex) = self
.context
.xen
.call
.read_console_ring_raw(clear, index)
.await?;
let buf = std::str::from_utf8(&rawbuf[..newindex as usize])?;
Ok(Arc::from(buf))
}
}

View File

@ -1,5 +1,6 @@
use anyhow::Result;
use indexmap::IndexMap;
use log::info;
use xencall::sys::{CpuId, SysctlCputopo};
use crate::RuntimeContext;
@ -151,7 +152,10 @@ impl PowerManagementContext {
.xen
.call
.set_turbo_mode(CpuId::All, enable)
.await?;
.await
.unwrap_or_else(|error| {
info!("non-fatal error while setting SMT policy: {:?}", error);
});
Ok(())
}
@ -161,7 +165,13 @@ impl PowerManagementContext {
.xen
.call
.set_cpufreq_gov(CpuId::All, policy)
.await?;
.await
.unwrap_or_else(|error| {
info!(
"non-fatal error while setting scheduler policy: {:?}",
error
);
});
Ok(())
}
}

View File

@ -0,0 +1,18 @@
use xencall::error::Result;
use xencall::XenCall;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let call = XenCall::open(0)?;
let index = 0_u32;
let (buf, newindex) = call.read_console_ring_raw(false, index).await?;
match std::str::from_utf8(&buf[..newindex as usize]) {
Ok(v) => print!("{}", v),
_ => panic!("unable to decode Xen console messages"),
};
Ok(())
}

View File

@ -26,12 +26,12 @@ use std::sync::Arc;
use std::time::Duration;
use sys::{
CpuId, E820Entry, ForeignMemoryMap, PhysdevMapPirq, Sysctl, SysctlCputopo, SysctlCputopoinfo,
SysctlPhysinfo, SysctlPmOp, SysctlPmOpValue, SysctlSetCpuFreqGov, SysctlValue,
VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, HYPERVISOR_SYSCTL, PHYSDEVOP_MAP_PIRQ,
SysctlPhysinfo, SysctlPmOp, SysctlPmOpValue, SysctlReadconsole, SysctlSetCpuFreqGov,
SysctlValue, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, HYPERVISOR_SYSCTL, PHYSDEVOP_MAP_PIRQ,
XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP,
XEN_SYSCTL_CPUTOPOINFO, XEN_SYSCTL_MAX_INTERFACE_VERSION, XEN_SYSCTL_MIN_INTERFACE_VERSION,
XEN_SYSCTL_PHYSINFO, XEN_SYSCTL_PM_OP, XEN_SYSCTL_PM_OP_DISABLE_TURBO,
XEN_SYSCTL_PM_OP_ENABLE_TURBO,
XEN_SYSCTL_PM_OP_ENABLE_TURBO, XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV, XEN_SYSCTL_READCONSOLE,
};
use tokio::sync::Semaphore;
use tokio::time::sleep;
@ -1038,7 +1038,7 @@ impl XenCall {
interface_version: self.sysctl_interface_version,
value: SysctlValue {
pm_op: SysctlPmOp {
cmd: XEN_SYSCTL_PM_OP_ENABLE_TURBO,
cmd: XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV,
cpuid,
value: SysctlPmOpValue {
set_gov: SysctlSetCpuFreqGov { scaling_governor },
@ -1087,4 +1087,33 @@ impl XenCall {
.await?;
Ok(())
}
pub async fn read_console_ring_raw(
&self,
clear: bool,
index: u32,
) -> Result<([u8; 16384], u32)> {
let mut u8buf = [0u8; 16384];
let mut sysctl = Sysctl {
cmd: XEN_SYSCTL_READCONSOLE,
interface_version: self.sysctl_interface_version,
value: SysctlValue {
console: SysctlReadconsole {
clear: clear as u8,
incremental: 1,
pad: 0,
index,
buffer: addr_of_mut!(u8buf) as u64,
count: 16384,
},
},
};
self.hypercall1(HYPERVISOR_SYSCTL, addr_of_mut!(sysctl) as c_ulong)
.await?;
// Safety: We are passing a SysctlReadconsole struct as part of the hypercall, and
// calling the hypercall is known to not change the underlying value outside changing
// the values on some SysctlReadconsole fields.
let newindex = unsafe { sysctl.value.console.index };
Ok((u8buf, newindex))
}
}

View File

@ -752,6 +752,7 @@ pub struct SysctlCputopoinfo {
#[repr(C)]
pub union SysctlValue {
pub console: SysctlReadconsole,
pub cputopoinfo: SysctlCputopoinfo,
pub pm_op: SysctlPmOp,
pub phys_info: SysctlPhysinfo,
@ -765,12 +766,14 @@ pub struct Sysctl {
pub value: SysctlValue,
}
pub const XEN_SYSCTL_READCONSOLE: u32 = 1;
pub const XEN_SYSCTL_PHYSINFO: u32 = 3;
pub const XEN_SYSCTL_PM_OP: u32 = 12;
pub const XEN_SYSCTL_CPUTOPOINFO: u32 = 16;
pub const XEN_SYSCTL_MIN_INTERFACE_VERSION: u32 = 0x00000015;
pub const XEN_SYSCTL_MAX_INTERFACE_VERSION: u32 = 0x00000020;
pub const XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV: u32 = 0x12;
pub const XEN_SYSCTL_PM_OP_SET_SCHED_OPT_STMT: u32 = 0x21;
pub const XEN_SYSCTL_PM_OP_ENABLE_TURBO: u32 = 0x26;
pub const XEN_SYSCTL_PM_OP_DISABLE_TURBO: u32 = 0x27;
@ -801,3 +804,14 @@ pub struct SysctlPhysinfo {
pub max_mfn: u64,
pub hw_cap: [u32; 8],
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
pub struct SysctlReadconsole {
pub clear: u8,
pub incremental: u8,
pub pad: u16,
pub index: u32,
pub buffer: u64,
pub count: u32,
}

View File

@ -13,9 +13,9 @@ async-trait = { workspace = true }
indexmap = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.14" }
krata-xenplatform = { path = "../xenplatform", version = "^0.0.14" }
krata-xenstore = { path = "../xenstore", version = "^0.0.14" }
krata-xencall = { path = "../xencall", version = "^0.0.20" }
krata-xenplatform = { path = "../xenplatform", version = "^0.0.20" }
krata-xenstore = { path = "../xenstore", version = "^0.0.20" }
regex = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }

View File

@ -27,7 +27,9 @@ async fn main() -> Result<()> {
base: BaseDomainConfig {
uuid: Uuid::new_v4(),
max_vcpus: 1,
mem_mb: 512,
target_vcpus: 1,
max_mem_mb: 512,
target_mem_mb: 512,
enable_iommu: true,
kernel: fs::read(&kernel_image_path).await?,
initrd: fs::read(&initrd_path).await?,

View File

@ -130,8 +130,7 @@ impl<P: BootSetupPlatform> XenClient<P> {
match self.init(created.domid, config, &created).await {
Ok(_) => Ok(created),
Err(err) => {
// ignore since destroying a domain is best
// effort when an error occurs
// ignore since destroying a domain is best-effort when an error occurs
let _ = self.domain_manager.destroy(created.domid).await;
Err(err)
}

View File

@ -156,13 +156,13 @@ impl ClientTransaction {
self.tx
.write_string(
format!("{}/memory/static-max", self.dom_path).as_str(),
&(base.mem_mb * 1024).to_string(),
&(base.max_mem_mb * 1024).to_string(),
)
.await?;
self.tx
.write_string(
format!("{}/memory/target", self.dom_path).as_str(),
&(base.mem_mb * 1024).to_string(),
&(base.target_mem_mb * 1024).to_string(),
)
.await?;
self.tx
@ -194,7 +194,16 @@ impl ClientTransaction {
self.tx.mkdir(&path).await?;
self.tx.set_perms(&path, ro_perm).await?;
let path = format!("{}/cpu/{}/availability", self.dom_path, i);
self.tx.write_string(&path, "online").await?;
self.tx
.write_string(
&path,
if i < base.target_vcpus {
"online"
} else {
"offline"
},
)
.await?;
self.tx.set_perms(&path, ro_perm).await?;
}
Ok(())

View File

@ -9,6 +9,7 @@ edition = "2021"
resolver = "2"
[dependencies]
byteorder = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
thiserror = { workspace = true }

View File

@ -1,9 +1,9 @@
use xenevtchn::error::Result;
use xenevtchn::EventChannel;
use xenevtchn::EventChannelService;
#[tokio::main]
async fn main() -> Result<()> {
let channel = EventChannel::open().await?;
let channel = EventChannelService::open().await?;
println!("channel opened");
let port = channel.bind_unbound_port(0).await?;
println!("port: {}", port);

View File

@ -8,6 +8,10 @@ pub enum Error {
Io(#[from] io::Error),
#[error("failed to send event channel wake: {0}")]
WakeSend(tokio::sync::broadcast::error::SendError<u32>),
#[error("failed to acquire lock")]
LockAcquireFailed,
#[error("event port already in use")]
PortInUse,
}
pub type Result<T> = std::result::Result<T, Error>;

View File

@ -1,82 +1,77 @@
pub mod error;
pub mod raw;
pub mod sys;
use crate::error::{Error, Result};
use crate::sys::{BindInterdomain, BindUnboundPort, BindVirq, Notify, UnbindPort};
use crate::raw::EVENT_CHANNEL_DEVICE;
use byteorder::{LittleEndian, ReadBytesExt};
use log::error;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::mem::size_of;
use std::os::fd::AsRawFd;
use std::os::raw::c_void;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::fs::{File, OpenOptions};
use tokio::io::AsyncReadExt;
use tokio::select;
use tokio::sync::broadcast::{
channel as broadcast_channel, Receiver as BroadcastReceiver, Sender as BroadastSender,
};
use tokio::sync::mpsc::{channel, Receiver, Sender};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio::sync::{Mutex, RwLock};
const UNBIND_CHANNEL_QUEUE_LEN: usize = 30;
const UNMASK_CHANNEL_QUEUE_LEN: usize = 30;
const BROADCAST_CHANNEL_QUEUE_LEN: usize = 30;
const CHANNEL_QUEUE_LEN: usize = 30;
type WakeMap = Arc<Mutex<HashMap<u32, BroadastSender<u32>>>>;
type WakeMap = Arc<RwLock<HashMap<u32, Sender<u32>>>>;
#[derive(Clone)]
pub struct EventChannel {
pub struct EventChannelService {
handle: Arc<Mutex<File>>,
wakes: WakeMap,
unbind_sender: Sender<u32>,
unmask_sender: Sender<u32>,
task: Arc<JoinHandle<()>>,
process_flag: Arc<AtomicBool>,
}
pub struct BoundEventChannel {
pub local_port: u32,
pub receiver: BroadcastReceiver<u32>,
unbind_sender: Sender<u32>,
pub unmask_sender: Sender<u32>,
pub receiver: Receiver<u32>,
pub service: EventChannelService,
}
impl BoundEventChannel {
pub async fn unmask(&self) -> Result<()> {
self.service.unmask(self.local_port).await
}
}
impl Drop for BoundEventChannel {
fn drop(&mut self) {
let _ = self.unbind_sender.try_send(self.local_port);
let service = self.service.clone();
let port = self.local_port;
tokio::task::spawn(async move {
let _ = service.unbind(port).await;
});
}
}
impl EventChannel {
pub async fn open() -> Result<EventChannel> {
let file = OpenOptions::new()
impl EventChannelService {
pub async fn open() -> Result<EventChannelService> {
let handle = OpenOptions::new()
.read(true)
.write(true)
.open("/dev/xen/evtchn")
.open(EVENT_CHANNEL_DEVICE)
.await?;
let wakes = Arc::new(Mutex::new(HashMap::new()));
let (unbind_sender, unbind_receiver) = channel(UNBIND_CHANNEL_QUEUE_LEN);
let (unmask_sender, unmask_receiver) = channel(UNMASK_CHANNEL_QUEUE_LEN);
let task = {
let file = file.try_clone().await?;
let wakes = wakes.clone();
tokio::task::spawn(async move {
if let Err(error) =
EventChannel::process(file, wakes, unmask_receiver, unbind_receiver).await
{
error!("event channel processor failed: {}", error);
}
})
let wakes = Arc::new(RwLock::new(HashMap::new()));
let flag = Arc::new(AtomicBool::new(false));
let processor = EventChannelProcessor {
flag: flag.clone(),
handle: handle.try_clone().await?.into_std().await,
wakes: wakes.clone(),
};
Ok(EventChannel {
handle: Arc::new(Mutex::new(file)),
processor.launch()?;
Ok(EventChannelService {
handle: Arc::new(Mutex::new(handle)),
wakes,
unbind_sender,
unmask_sender,
task: Arc::new(task),
process_flag: flag,
})
}
@ -109,11 +104,29 @@ impl EventChannel {
}
}
pub async fn unmask(&self, port: u32) -> Result<()> {
let handle = self.handle.lock().await;
let mut port = port;
let result = unsafe {
libc::write(
handle.as_raw_fd(),
&mut port as *mut u32 as *mut c_void,
size_of::<u32>(),
)
};
if result != size_of::<u32>() as isize {
return Err(Error::Io(std::io::Error::from_raw_os_error(result as i32)));
}
Ok(())
}
pub async fn unbind(&self, port: u32) -> Result<u32> {
let handle = self.handle.lock().await;
unsafe {
let mut request = UnbindPort { port };
Ok(sys::unbind(handle.as_raw_fd(), &mut request)? as u32)
let result = sys::unbind(handle.as_raw_fd(), &mut request)? as u32;
self.wakes.write().await.remove(&port);
Ok(result)
}
}
@ -132,95 +145,66 @@ impl EventChannel {
pub async fn bind(&self, domid: u32, port: u32) -> Result<BoundEventChannel> {
let local_port = self.bind_interdomain(domid, port).await?;
let (receiver, unmask_sender) = self.subscribe(local_port).await?;
let receiver = self.subscribe(local_port).await?;
let bound = BoundEventChannel {
local_port,
receiver,
unbind_sender: self.unbind_sender.clone(),
unmask_sender,
service: self.clone(),
};
Ok(bound)
}
pub async fn subscribe(&self, port: u32) -> Result<(BroadcastReceiver<u32>, Sender<u32>)> {
let mut wakes = self.wakes.lock().await;
pub async fn subscribe(&self, port: u32) -> Result<Receiver<u32>> {
let mut wakes = self.wakes.write().await;
let receiver = match wakes.entry(port) {
Entry::Occupied(entry) => entry.get().subscribe(),
Entry::Occupied(_) => {
return Err(Error::PortInUse);
}
Entry::Vacant(entry) => {
let (sender, receiver) = broadcast_channel::<u32>(BROADCAST_CHANNEL_QUEUE_LEN);
let (sender, receiver) = channel::<u32>(CHANNEL_QUEUE_LEN);
entry.insert(sender);
receiver
}
};
Ok((receiver, self.unmask_sender.clone()))
Ok(receiver)
}
}
async fn process(
mut file: File,
wakers: WakeMap,
mut unmask_receiver: Receiver<u32>,
mut unbind_receiver: Receiver<u32>,
) -> Result<()> {
loop {
select! {
result = file.read_u32_le() => {
match result {
Ok(port) => {
if let Some(sender) = wakers.lock().await.get(&port) {
if let Err(error) = sender.send(port) {
return Err(Error::WakeSend(error));
}
}
}
pub struct EventChannelProcessor {
flag: Arc<AtomicBool>,
handle: std::fs::File,
wakes: WakeMap,
}
Err(error) => return Err(Error::Io(error))
}
}
result = unmask_receiver.recv() => {
match result {
Some(port) => {
unsafe {
let mut port = port;
let result = libc::write(file.as_raw_fd(), &mut port as *mut u32 as *mut c_void, size_of::<u32>());
if result != size_of::<u32>() as isize {
return Err(Error::Io(std::io::Error::from_raw_os_error(result as i32)));
}
}
}
None => {
impl EventChannelProcessor {
pub fn launch(mut self) -> Result<()> {
std::thread::spawn(move || {
while let Err(error) = self.process() {
if self.flag.load(Ordering::Acquire) {
break;
}
error!("failed to process event channel wakes: {}", error);
}
}
result = unbind_receiver.recv() => {
match result {
Some(port) => {
unsafe {
let mut request = UnbindPort { port };
sys::unbind(file.as_raw_fd(), &mut request)?;
}
}
None => {
break;
}
}
}
};
}
});
Ok(())
}
pub fn process(&mut self) -> Result<()> {
loop {
let port = self.handle.read_u32::<LittleEndian>()?;
if let Some(wake) = self.wakes.blocking_read().get(&port) {
let _ = wake.try_send(port);
}
}
}
}
impl Drop for EventChannel {
impl Drop for EventChannelService {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
if Arc::strong_count(&self.handle) <= 1 {
self.process_flag.store(true, Ordering::Release);
}
}
}

View File

@ -0,0 +1,84 @@
use std::fs::{File, OpenOptions};
use std::os::fd::AsRawFd;
use std::sync::{Arc, Mutex};
use byteorder::{LittleEndian, ReadBytesExt};
use crate::error::{Error, Result};
use crate::sys;
pub const EVENT_CHANNEL_DEVICE: &str = "/dev/xen/evtchn";
#[derive(Clone)]
pub struct RawEventChannelService {
handle: Arc<Mutex<File>>,
}
impl RawEventChannelService {
pub fn open() -> Result<RawEventChannelService> {
let handle = OpenOptions::new()
.read(true)
.write(true)
.open(EVENT_CHANNEL_DEVICE)?;
let handle = Arc::new(Mutex::new(handle));
Ok(RawEventChannelService { handle })
}
pub fn from_handle(handle: File) -> Result<RawEventChannelService> {
Ok(RawEventChannelService {
handle: Arc::new(Mutex::new(handle)),
})
}
pub fn bind_virq(&self, virq: u32) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
let mut request = sys::BindVirq { virq };
Ok(unsafe { sys::bind_virq(handle.as_raw_fd(), &mut request)? as u32 })
}
pub fn bind_interdomain(&self, domid: u32, port: u32) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
let mut request = sys::BindInterdomain {
remote_domain: domid,
remote_port: port,
};
Ok(unsafe { sys::bind_interdomain(handle.as_raw_fd(), &mut request)? as u32 })
}
pub fn bind_unbound_port(&self, domid: u32) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
let mut request = sys::BindUnboundPort {
remote_domain: domid,
};
Ok(unsafe { sys::bind_unbound_port(handle.as_raw_fd(), &mut request)? as u32 })
}
pub fn unbind(&self, port: u32) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
let mut request = sys::UnbindPort { port };
Ok(unsafe { sys::unbind(handle.as_raw_fd(), &mut request)? as u32 })
}
pub fn notify(&self, port: u32) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
let mut request = sys::Notify { port };
Ok(unsafe { sys::notify(handle.as_raw_fd(), &mut request)? as u32 })
}
pub fn reset(&self) -> Result<u32> {
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
Ok(unsafe { sys::reset(handle.as_raw_fd())? as u32 })
}
pub fn pending(&self) -> Result<u32> {
let mut handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
Ok(handle.read_u32::<LittleEndian>()?)
}
pub fn into_handle(self) -> Result<File> {
Arc::into_inner(self.handle)
.ok_or(Error::LockAcquireFailed)?
.into_inner()
.map_err(|_| Error::LockAcquireFailed)
}
}

View File

@ -16,7 +16,7 @@ flate2 = { workspace = true }
indexmap = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
krata-xencall = { path = "../xencall", version = "^0.0.14" }
krata-xencall = { path = "../xencall", version = "^0.0.20" }
memchr = { workspace = true }
nix = { workspace = true }
regex = { workspace = true }

View File

@ -162,11 +162,13 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
pub async fn initialize(
&mut self,
initrd: &[u8],
mem_mb: u64,
target_mem_mb: u64,
max_mem_mb: u64,
max_vcpus: u32,
cmdline: &str,
) -> Result<BootDomain> {
let total_pages = mem_mb << (20 - self.platform.page_shift());
let target_pages = target_mem_mb << (20 - self.platform.page_shift());
let total_pages = max_mem_mb << (20 - self.platform.page_shift());
let image_info = self.image_loader.parse(self.platform.hvm()).await?;
let mut domain = BootDomain {
domid: self.domid,
@ -175,7 +177,7 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
virt_pgtab_end: 0,
pfn_alloc_end: 0,
total_pages,
target_pages: total_pages,
target_pages,
page_size: self.platform.page_size(),
image_info,
console_evtchn: 0,

View File

@ -9,6 +9,8 @@ use xencall::XenCall;
use crate::error::Result;
pub const XEN_EXTRA_MEMORY_KB: u64 = 2048;
pub struct BaseDomainManager<P: BootSetupPlatform> {
call: XenCall,
pub platform: Arc<P>,
@ -29,7 +31,7 @@ impl<P: BootSetupPlatform> BaseDomainManager<P> {
let domid = self.call.create_domain(domain).await?;
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
self.call
.set_max_mem(domid, (config.mem_mb * 1024) + 2048)
.set_max_mem(domid, (config.max_mem_mb * 1024) + XEN_EXTRA_MEMORY_KB)
.await?;
let loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
let platform = (*self.platform).clone();
@ -37,7 +39,8 @@ impl<P: BootSetupPlatform> BaseDomainManager<P> {
let mut domain = boot
.initialize(
&config.initrd,
config.mem_mb,
config.target_mem_mb,
config.max_mem_mb,
config.max_vcpus,
&config.cmdline,
)
@ -63,7 +66,9 @@ pub struct BaseDomainConfig {
pub uuid: Uuid,
pub owner_domid: u32,
pub max_vcpus: u32,
pub mem_mb: u64,
pub target_vcpus: u32,
pub max_mem_mb: u64,
pub target_mem_mb: u64,
pub kernel: Vec<u8>,
pub initrd: Vec<u8>,
pub cmdline: String,

View File

@ -116,9 +116,11 @@ impl XsdSocket {
let rx_task = std::thread::Builder::new()
.name("xenstore-reader".to_string())
.spawn(move || {
if let Err(error) = XsdSocketProcessor::process_rx(read, rx_sender) {
let mut read = read;
if let Err(error) = XsdSocketProcessor::process_rx(&mut read, rx_sender) {
debug!("failed to process xen store bus: {}", error);
}
std::mem::forget(read);
})?;
Ok(XsdSocket {
@ -197,12 +199,11 @@ struct XsdSocketProcessor {
}
impl XsdSocketProcessor {
fn process_rx(mut read: std::fs::File, rx_sender: Sender<XsdMessage>) -> Result<()> {
fn process_rx(read: &mut std::fs::File, rx_sender: Sender<XsdMessage>) -> Result<()> {
let mut header_buffer: Vec<u8> = vec![0u8; XsdMessageHeader::SIZE];
let mut buffer: Vec<u8> = vec![0u8; XEN_BUS_MAX_PACKET_SIZE - XsdMessageHeader::SIZE];
loop {
let message =
XsdSocketProcessor::read_message(&mut header_buffer, &mut buffer, &mut read)?;
let message = XsdSocketProcessor::read_message(&mut header_buffer, &mut buffer, read)?;
rx_sender.blocking_send(message)?;
}
}
@ -297,7 +298,7 @@ impl XsdSocketProcessor {
break;
}
}
};
}
}
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More