Compare commits

...

33 Commits

Author SHA1 Message Date
Ann Wallace
a58bb3b0fb
added links in the tech overview (#475) 2025-07-03 20:34:27 +00:00
Ann Wallace
699c2e09ef
added a technical overview and added a link in the readme (#474) 2025-07-03 14:46:02 +00:00
stepsecurity-app[bot]
53052b24a0
[StepSecurity] Apply security best practices (#473)
Signed-off-by: StepSecurity Bot <bot@stepsecurity.io>
Co-authored-by: stepsecurity-app[bot] <188008098+stepsecurity-app[bot]@users.noreply.github.com>
2025-06-06 18:36:16 +00:00
dependabot[bot]
ea3ac96926
build(deps): bump the actions-updates group across 1 directory with 3 updates (#467)
Bumps the actions-updates group with 3 updates in the / directory: [step-security/harden-runner](https://github.com/step-security/harden-runner), [actions/create-github-app-token](https://github.com/actions/create-github-app-token) and [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `step-security/harden-runner` from 2.10.2 to 2.11.0
- [Release notes](https://github.com/step-security/harden-runner/releases)
- [Commits](0080882f6c...4d991eb9b9)

Updates `actions/create-github-app-token` from 1.11.0 to 1.11.6
- [Release notes](https://github.com/actions/create-github-app-token/releases)
- [Commits](5d869da34e...21cfef2b49)

Updates `MarcoIeni/release-plz-action` from 0.5.86 to 0.5.99
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](db75300cf2...476794ede1)

---
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions-updates
- dependency-name: actions/create-github-app-token
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-05 14:26:50 +00:00
dependabot[bot]
cc1ff65869
build(deps): bump the dep-updates group across 1 directory with 4 updates (#441)
Bumps the dep-updates group with 4 updates in the / directory: [async-trait](https://github.com/dtolnay/async-trait), [env_logger](https://github.com/rust-cli/env_logger), [libc](https://github.com/rust-lang/libc) and [thiserror](https://github.com/dtolnay/thiserror).


Updates `async-trait` from 0.1.83 to 0.1.85
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.83...0.1.85)

Updates `env_logger` from 0.11.5 to 0.11.6
- [Release notes](https://github.com/rust-cli/env_logger/releases)
- [Changelog](https://github.com/rust-cli/env_logger/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-cli/env_logger/compare/v0.11.5...v0.11.6)

Updates `libc` from 0.2.168 to 0.2.169
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.169/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.168...0.2.169)

Updates `thiserror` from 2.0.7 to 2.0.9
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.7...2.0.9)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: env_logger
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-07 06:38:16 +00:00
edera-cultivation[bot]
b7dd2b8d89
chore: release v0.0.24 (#388)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-12-14 23:53:21 +00:00
Alex Zenla
bf8a8379db
fix(xenplatform): use cfg attributes for returning supported platforms 2024-12-14 18:46:45 -05:00
Alex Zenla
e0bbeb5d64
fix(xenclient): examples should use supported platform 2024-12-14 18:33:53 -05:00
Alex Zenla
4b0f3782bd
fix(xenclient): boot example should use unsupported platform on aarch64 2024-12-14 18:22:37 -05:00
Alex Zenla
3adf9b5e88
feat(xen): update xenclient and xenplatform to the latest structure (#433) 2024-12-14 18:16:10 -05:00
dependabot[bot]
f9d4508149
build(deps): bump the actions-updates group across 1 directory with 3 updates (#427)
Bumps the actions-updates group with 3 updates in the / directory: [step-security/harden-runner](https://github.com/step-security/harden-runner), [actions/checkout](https://github.com/actions/checkout) and [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `step-security/harden-runner` from 2.10.1 to 2.10.2
- [Release notes](https://github.com/step-security/harden-runner/releases)
- [Commits](91182cccc0...0080882f6c)

Updates `actions/checkout` from 4.2.1 to 4.2.2
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](eef61447b9...11bd71901b)

Updates `MarcoIeni/release-plz-action` from 0.5.76 to 0.5.86
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](ede2f28fa4...db75300cf2)

---
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Alex Zenla <alex@edera.dev>
2024-12-14 18:10:32 -05:00
Alex Zenla
54dc9cbdaa
chore(deps): upgrade dependencies and clean code (#432) 2024-12-14 18:09:56 -05:00
Alex Zenla
ea9624955c
feat(evtchn): harden evtchn handling and improve api (#431) 2024-12-14 18:03:33 -05:00
Alex Zenla
960578efc4
feat(xencall): improve asynchronous support (#430) 2024-12-14 18:00:11 -05:00
Alex Zenla
d7affe6c8c
feat(xenstore): multi-watch and maybe-commit support (#429) 2024-12-14 17:57:15 -05:00
dependabot[bot]
a04a812f28
build(deps): bump hashbrown from 0.15.0 to 0.15.2 in the cargo group (#426)
Bumps the cargo group with 1 update: [hashbrown](https://github.com/rust-lang/hashbrown).


Updates `hashbrown` from 0.15.0 to 0.15.2
- [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/hashbrown/commits)

---
updated-dependencies:
- dependency-name: hashbrown
  dependency-type: indirect
  dependency-group: cargo
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-13 01:07:56 +00:00
dependabot[bot]
9f6383b4c5
build(deps): bump the dep-updates group across 1 directory with 4 updates (#415)
Bumps the dep-updates group with 4 updates in the / directory: [libc](https://github.com/rust-lang/libc), [regex](https://github.com/rust-lang/regex), [thiserror](https://github.com/dtolnay/thiserror) and [tokio](https://github.com/tokio-rs/tokio).


Updates `libc` from 0.2.159 to 0.2.162
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.162/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.159...0.2.162)

Updates `regex` from 1.11.0 to 1.11.1
- [Release notes](https://github.com/rust-lang/regex/releases)
- [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/regex/compare/1.11.0...1.11.1)

Updates `thiserror` from 1.0.64 to 1.0.65
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.64...1.0.65)

Updates `tokio` from 1.40.0 to 1.41.1
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.40.0...tokio-1.41.1)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: regex
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-10 11:31:15 +00:00
Alex Zenla
7f5a8c7a6e
fix(xenplatform): e820 sanitize should now produce valid mappings 2024-10-17 13:27:23 -04:00
dependabot[bot]
b1e7a05440
build(deps): bump the actions-updates group across 1 directory with 2 updates (#404)
Bumps the actions-updates group with 2 updates in the / directory: [actions/checkout](https://github.com/actions/checkout) and [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `actions/checkout` from 4.2.0 to 4.2.1
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](d632683dd7...eef61447b9)

Updates `MarcoIeni/release-plz-action` from 0.5.72 to 0.5.76
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](dbf3db74cd...ede2f28fa4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-16 16:10:56 +00:00
dependabot[bot]
998df55711
build(deps): bump uuid from 1.10.0 to 1.11.0 in the dep-updates group (#405)
Bumps the dep-updates group with 1 update: [uuid](https://github.com/uuid-rs/uuid).


Updates `uuid` from 1.10.0 to 1.11.0
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.10.0...1.11.0)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-16 16:10:18 +00:00
dependabot[bot]
3138c3e3e5
build(deps): bump MarcoIeni/release-plz-action (#399)
Bumps the actions-updates group with 1 update: [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `MarcoIeni/release-plz-action` from 0.5.71 to 0.5.72
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](91356927c5...dbf3db74cd)

---
updated-dependencies:
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-03 21:00:36 +00:00
dependabot[bot]
d12fee305c
build(deps): bump the actions-updates group across 1 directory with 2 updates (#397)
Bumps the actions-updates group with 2 updates in the / directory: [actions/checkout](https://github.com/actions/checkout) and [MarcoIeni/release-plz-action](https://github.com/marcoieni/release-plz-action).


Updates `actions/checkout` from 4.1.7 to 4.2.0
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](692973e3d9...d632683dd7)

Updates `MarcoIeni/release-plz-action` from 0.5.65 to 0.5.71
- [Release notes](https://github.com/marcoieni/release-plz-action/releases)
- [Commits](e28810957e...91356927c5)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions-updates
- dependency-name: MarcoIeni/release-plz-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-02 15:53:34 +00:00
dependabot[bot]
66aef77b05
build(deps): bump the dep-updates group across 1 directory with 6 updates (#398)
Bumps the dep-updates group with 6 updates in the / directory:

| Package | From | To |
| --- | --- | --- |
| [async-trait](https://github.com/dtolnay/async-trait) | `0.1.82` | `0.1.83` |
| [flate2](https://github.com/rust-lang/flate2-rs) | `1.0.33` | `1.0.34` |
| [indexmap](https://github.com/indexmap-rs/indexmap) | `2.5.0` | `2.6.0` |
| [libc](https://github.com/rust-lang/libc) | `0.2.158` | `0.2.159` |
| [regex](https://github.com/rust-lang/regex) | `1.10.6` | `1.11.0` |
| [thiserror](https://github.com/dtolnay/thiserror) | `1.0.63` | `1.0.64` |



Updates `async-trait` from 0.1.82 to 0.1.83
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.82...0.1.83)

Updates `flate2` from 1.0.33 to 1.0.34
- [Release notes](https://github.com/rust-lang/flate2-rs/releases)
- [Changelog](https://github.com/rust-lang/flate2-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/flate2-rs/compare/1.0.33...1.0.34)

Updates `indexmap` from 2.5.0 to 2.6.0
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.5.0...2.6.0)

Updates `libc` from 0.2.158 to 0.2.159
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Changelog](https://github.com/rust-lang/libc/blob/0.2.159/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.158...0.2.159)

Updates `regex` from 1.10.6 to 1.11.0
- [Release notes](https://github.com/rust-lang/regex/releases)
- [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/regex/compare/1.10.6...1.11.0)

Updates `thiserror` from 1.0.63 to 1.0.64
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.63...1.0.64)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: flate2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: regex
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-02 15:53:19 +00:00
edera-cultivation[bot]
367b4e5124
chore: release (#387)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-09-17 04:19:00 +00:00
edera-cultivation[bot]
b9559525c6
chore: release (#386)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-09-17 03:37:45 +00:00
Alex Zenla
cc63412a7f
relicense to GPL v2.0 2024-09-16 02:32:59 -04:00
dependabot[bot]
aa473b1c8b
build(deps): bump the dep-updates group across 1 directory with 3 updates (#385)
Bumps the dep-updates group with 3 updates in the / directory: [async-trait](https://github.com/dtolnay/async-trait), [indexmap](https://github.com/indexmap-rs/indexmap) and [tokio](https://github.com/tokio-rs/tokio).


Updates `async-trait` from 0.1.81 to 0.1.82
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.81...0.1.82)

Updates `indexmap` from 2.4.0 to 2.5.0
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.4.0...2.5.0)

Updates `tokio` from 1.39.3 to 1.40.0
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.39.3...tokio-1.40.0)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dep-updates
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dep-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-16 06:27:26 +00:00
dependabot[bot]
7e34766bdc
build(deps): bump the actions-updates group with 2 updates (#384)
Bumps the actions-updates group with 2 updates: [step-security/harden-runner](https://github.com/step-security/harden-runner) and [actions/create-github-app-token](https://github.com/actions/create-github-app-token).


Updates `step-security/harden-runner` from 2.9.1 to 2.10.1
- [Release notes](https://github.com/step-security/harden-runner/releases)
- [Commits](5c7944e73c...91182cccc0)

Updates `actions/create-github-app-token` from 1.10.3 to 1.11.0
- [Release notes](https://github.com/actions/create-github-app-token/releases)
- [Commits](31c86eb3b3...5d869da34e)

---
updated-dependencies:
- dependency-name: step-security/harden-runner
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions-updates
- dependency-name: actions/create-github-app-token
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-16 06:27:16 +00:00
Alex Zenla
83fd7607dd
preparations for xen control-plane 2024-09-16 02:13:35 -04:00
Alex Zenla
ab0d66c611
fix release-plz workflow 2024-09-16 02:05:46 -04:00
Alex Zenla
67652a5e1b
krata xen control-plane 2024-09-16 02:03:43 -04:00
edera-cultivation[bot]
a320efad6b
chore: release (#368)
Co-authored-by: edera-cultivation[bot] <165992271+edera-cultivation[bot]@users.noreply.github.com>
2024-09-03 21:13:00 +00:00
dependabot[bot]
a61e2fb30a
build(deps): bump quinn-proto from 0.11.6 to 0.11.8 in the cargo group (#373)
Bumps the cargo group with 1 update: [quinn-proto](https://github.com/quinn-rs/quinn).


Updates `quinn-proto` from 0.11.6 to 0.11.8
- [Release notes](https://github.com/quinn-rs/quinn/releases)
- [Commits](https://github.com/quinn-rs/quinn/compare/quinn-proto-0.11.6...quinn-proto-0.11.8)

---
updated-dependencies:
- dependency-name: quinn-proto
  dependency-type: indirect
  dependency-group: cargo
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-03 20:55:54 +00:00
231 changed files with 3128 additions and 23997 deletions

View File

@ -1,2 +0,0 @@
/target
/kernel/linux-*

View File

@ -5,10 +5,10 @@ updates:
schedule:
interval: "daily"
groups:
dep-updates:
actions-updates:
dependency-type: "production"
applies-to: "version-updates"
dev-updates:
actions-dev-updates:
dependency-type: "development"
applies-to: "version-updates"
- package-ecosystem: "cargo"
@ -22,14 +22,3 @@ updates:
dev-updates:
dependency-type: "development"
applies-to: "version-updates"
- package-ecosystem: "docker"
directory: "/images"
schedule:
interval: "daily"
groups:
dep-updates:
dependency-type: "production"
applies-to: "version-updates"
dev-updates:
dependency-type: "development"
applies-to: "version-updates"

View File

@ -6,17 +6,20 @@ on:
merge_group:
branches:
- main
permissions:
contents: read
jobs:
rustfmt:
name: rustfmt
runs-on: ubuntu-latest
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: install stable rust toolchain with rustfmt
@ -33,11 +36,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: shellcheck
@ -55,11 +58,11 @@ jobs:
name: full build linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: install stable rust toolchain
@ -83,11 +86,11 @@ jobs:
name: full test linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: install stable rust toolchain
@ -110,11 +113,11 @@ jobs:
name: full clippy linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
- name: install stable rust toolchain with clippy
@ -126,76 +129,3 @@ jobs:
run: ./hack/ci/install-linux-deps.sh
- name: cargo clippy
run: ./hack/build/cargo.sh clippy
zone-initrd:
runs-on: ubuntu-latest
strategy:
matrix:
arch:
- x86_64
- aarch64
env:
TARGET_ARCH: "${{ matrix.arch }}"
name: zone initrd linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
run: |
rustup update --no-self-update stable
rustup default stable
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
- name: initrd build
run: ./hack/initrd/build.sh
kratactl-build:
strategy:
fail-fast: false
matrix:
platform:
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
env:
TARGET_OS: "${{ matrix.platform.os }}"
TARGET_ARCH: "${{ matrix.platform.arch }}"
runs-on: "${{ matrix.platform.on }}"
name: kratactl build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
defaults:
run:
shell: bash
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: configure git line endings
run: git config --global core.autocrlf false && git config --global core.eol lf
if: ${{ matrix.platform.os == 'windows' }}
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain
run: |
rustup update --no-self-update stable
rustup default stable
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- name: setup homebrew
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- name: install ${{ matrix.platform.deps }} dependencies
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- name: cargo build kratactl
run: ./hack/build/cargo.sh build --bin kratactl

View File

@ -1,165 +0,0 @@
name: nightly
on:
workflow_dispatch:
schedule:
- cron: "0 10 * * *"
permissions:
contents: read
jobs:
full-build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
arch:
- x86_64
- aarch64
env:
TARGET_ARCH: "${{ matrix.arch }}"
CI_NEEDS_FPM: "1"
name: nightly full build linux-${{ matrix.arch }}
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
run: |
rustup update --no-self-update stable
rustup default stable
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
- name: build systemd bundle
run: ./hack/dist/bundle.sh
- name: upload systemd bundle
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-bundle-systemd-${{ matrix.arch }}
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
compression-level: 0
- name: build deb package
run: ./hack/dist/deb.sh
- name: upload deb package
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-debian-${{ matrix.arch }}
path: "target/dist/*.deb"
compression-level: 0
- name: build apk package
run: ./hack/dist/apk.sh
- name: upload apk package
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: krata-alpine-${{ matrix.arch }}
path: "target/dist/*_${{ matrix.arch }}.apk"
compression-level: 0
kratactl-build:
strategy:
fail-fast: false
matrix:
platform:
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
env:
TARGET_OS: "${{ matrix.platform.os }}"
TARGET_ARCH: "${{ matrix.platform.arch }}"
runs-on: "${{ matrix.platform.on }}"
name: nightly kratactl build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
defaults:
run:
shell: bash
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: configure git line endings
run: git config --global core.autocrlf false && git config --global core.eol lf
if: ${{ matrix.platform.os == 'windows' }}
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain
run: |
rustup update --no-self-update stable
rustup default stable
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- name: setup homebrew
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- name: install ${{ matrix.platform.deps }} dependencies
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- name: cargo build kratactl
run: ./hack/build/cargo.sh build --release --bin kratactl
- name: upload kratactl
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl"
if: ${{ matrix.platform.os != 'windows' }}
- name: upload kratactl
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
with:
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
path: "target/*/release/kratactl.exe"
if: ${{ matrix.platform.os == 'windows' }}
oci-build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
component:
- kratactl
- kratad
- kratanet
- krata-zone
name: nightly oci build ${{ matrix.component }}
permissions:
contents: read
id-token: write
packages: write
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install cosign
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
- name: setup docker buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: login to container registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
registry: ghcr.io
username: "${{ github.actor }}"
password: "${{ secrets.GITHUB_TOKEN }}"
- name: docker build and push ${{ matrix.component }}
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
id: push
with:
file: ./images/Dockerfile.${{ matrix.component }}
platforms: linux/amd64,linux/aarch64
tags: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
push: true
- name: cosign sign ${{ matrix.component }}
run: cosign sign --yes "${TAGS}@${DIGEST}"
env:
DIGEST: "${{ steps.push.outputs.digest }}"
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
COSIGN_EXPERIMENTAL: "true"

View File

@ -1,172 +0,0 @@
name: release-assets
on:
release:
types:
- published
env:
CARGO_INCREMENTAL: 0
CARGO_NET_GIT_FETCH_WITH_CLI: true
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
RUSTUP_MAX_RETRIES: 10
jobs:
services:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
arch:
- x86_64
- aarch64
env:
TARGET_ARCH: "${{ matrix.arch }}"
CI_NEEDS_FPM: "1"
name: release-assets services ${{ matrix.arch }}
permissions:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
run: |
rustup update --no-self-update stable
rustup default stable
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
- name: build systemd bundle
run: ./hack/dist/bundle.sh
- name: assemble systemd bundle
run: "./hack/ci/assemble-release-assets.sh bundle-systemd ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
- name: build deb package
run: ./hack/dist/deb.sh
- name: assemble deb package
run: "./hack/ci/assemble-release-assets.sh debian ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*.deb"
- name: build apk package
run: ./hack/dist/apk.sh
- name: assemble apk package
run: "./hack/ci/assemble-release-assets.sh alpine ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*_${{ matrix.arch }}.apk"
- name: upload release artifacts
run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
kratactl:
strategy:
fail-fast: false
matrix:
platform:
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
env:
TARGET_OS: "${{ matrix.platform.os }}"
TARGET_ARCH: "${{ matrix.platform.arch }}"
runs-on: "${{ matrix.platform.on }}"
name: release-assets kratactl ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
defaults:
run:
shell: bash
timeout-minutes: 60
permissions:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install stable rust toolchain
run: |
rustup update --no-self-update stable
rustup default stable
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
if: ${{ matrix.platform.os == 'darwin' }}
- name: setup homebrew
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
if: ${{ matrix.platform.os == 'darwin' }}
- name: install ${{ matrix.platform.deps }} dependencies
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
- name: cargo build kratactl
run: ./hack/build/cargo.sh build --release --bin kratactl
- name: assemble kratactl executable
run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl"
if: ${{ matrix.platform.os != 'windows' }}
- name: assemble kratactl executable
run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl.exe"
if: ${{ matrix.platform.os == 'windows' }}
- name: upload release artifacts
run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
oci:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
component:
- kratactl
- kratad
- kratanet
- krata-zone
name: release-assets oci ${{ matrix.component }}
permissions:
contents: read
id-token: write
packages: write
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
with:
egress-policy: audit
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
submodules: recursive
- name: install cosign
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
- name: setup docker buildx
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: login to container registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
registry: ghcr.io
username: "${{ github.actor }}"
password: "${{ secrets.GITHUB_TOKEN }}"
- name: capture krata version
id: version
run: |
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
- name: docker build and push ${{ matrix.component }}
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
id: push
with:
file: ./images/Dockerfile.${{ matrix.component }}
platforms: linux/amd64,linux/aarch64
tags: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }},ghcr.io/edera-dev/${{ matrix.component }}:latest"
push: true
- name: cosign sign ${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}
run: cosign sign --yes "${TAGS}@${DIGEST}"
env:
DIGEST: "${{ steps.push.outputs.digest }}"
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}"
COSIGN_EXPERIMENTAL: "true"
- name: cosign sign ${{ matrix.component }}:latest
run: cosign sign --yes "${TAGS}@${DIGEST}"
env:
DIGEST: "${{ steps.push.outputs.digest }}"
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:latest"
COSIGN_EXPERIMENTAL: "true"

View File

@ -6,6 +6,9 @@ on:
concurrency:
group: "${{ github.workflow }}"
cancel-in-progress: true
permissions:
contents: read
jobs:
release-plz:
name: release-plz
@ -15,17 +18,17 @@ jobs:
contents: write
steps:
- name: harden runner
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: generate cultivator token
uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3
uses: actions/create-github-app-token@21cfef2b496dd8ef5b904c159339626a10ad380e # v1.11.6
id: generate-token
with:
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
- name: checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
fetch-depth: 0
@ -37,7 +40,7 @@ jobs:
- name: install linux dependencies
run: ./hack/ci/install-linux-deps.sh
- name: release-plz
uses: MarcoIeni/release-plz-action@e28810957ef1fedfa89b5e9692e750ce45f62a67 # v0.5.65
uses: MarcoIeni/release-plz-action@476794ede164c5137bfc3a1dc6ed3675275690f9 # v0.5.99
env:
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"

2
.gitignore vendored
View File

@ -1 +1,3 @@
/target
/.idea
/.vscode

View File

@ -1,4 +1,5 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
@ -6,197 +7,36 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.0.20](https://github.com/edera-dev/krata/compare/v0.0.19...v0.0.20) - 2024-08-27
## [0.0.24](https://github.com/edera-dev/krata/compare/v0.0.23...v0.0.24) - 2024-12-14
### Added
- *(krata)* implement network reservation list ([#366](https://github.com/edera-dev/krata/pull/366))
- *(zone-exec)* implement terminal resize support ([#363](https://github.com/edera-dev/krata/pull/363))
### Other
- update Cargo.toml dependencies
## [0.0.19](https://github.com/edera-dev/krata/compare/v0.0.18...v0.0.19) - 2024-08-25
### Added
- *(config)* write default config to config.toml on startup ([#356](https://github.com/edera-dev/krata/pull/356))
- *(ctl)* add --format option to host status and improve cpu topology format ([#355](https://github.com/edera-dev/krata/pull/355))
- *(xen)* update xenclient and xenplatform to the latest structure (#433)
- *(xencall)* improve asynchronous support (#430)
- *(evtchn)* harden evtchn handling and improve api (#431)
- *(xenstore)* multi-watch and maybe-commit support (#429)
### Fixed
- *(zone-exec)* ensure that the underlying process is killed when rpc is closed ([#361](https://github.com/edera-dev/krata/pull/361))
- *(rpc)* rename HostStatus to GetHostStatus ([#360](https://github.com/edera-dev/krata/pull/360))
- *(console)* don't replay history when attaching to the console ([#358](https://github.com/edera-dev/krata/pull/358))
- *(zone-exec)* catch panic errors and show all errors immediately ([#359](https://github.com/edera-dev/krata/pull/359))
- *(xenclient)* examples should use supported platform
- *(xenclient)* boot example should use unsupported platform on aarch64
- *(xenplatform)* e820 sanitize should now produce valid mappings
- *(xenplatform)* use cfg attributes for returning supported platforms
### Other
- *(control)* split out all of the rpc calls into their own files ([#357](https://github.com/edera-dev/krata/pull/357))
## [0.0.18](https://github.com/edera-dev/krata/compare/v0.0.17...v0.0.18) - 2024-08-22
### Added
- *(zone)* kernel command line control on launch ([#351](https://github.com/edera-dev/krata/pull/351))
- *(xen-preflight)* test for hypervisor presence explicitly and error if missing ([#347](https://github.com/edera-dev/krata/pull/347))
### Fixed
- *(network)* allocate host ip from allocation pool ([#353](https://github.com/edera-dev/krata/pull/353))
- *(daemon)* turn off trace logging ([#352](https://github.com/edera-dev/krata/pull/352))
### Other
- Add support for reading hypervisor console ([#344](https://github.com/edera-dev/krata/pull/344))
- *(ctl)* move logic for branching ctl run steps into ControlCommands ([#342](https://github.com/edera-dev/krata/pull/342))
- *(deps)* upgrade dependencies and clean code (#432)
- update Cargo.toml dependencies
## [0.0.17](https://github.com/edera-dev/krata/compare/v0.0.16...v0.0.17) - 2024-08-15
### Added
- *(krata)* first pass on cpu hotplug support ([#340](https://github.com/edera-dev/krata/pull/340))
- *(exec)* implement tty support (fixes [#335](https://github.com/edera-dev/krata/pull/335)) ([#336](https://github.com/edera-dev/krata/pull/336))
- *(krata)* dynamic resource allocation (closes [#298](https://github.com/edera-dev/krata/pull/298)) ([#333](https://github.com/edera-dev/krata/pull/333))
## [0.0.23](https://github.com/edera-dev/krata/compare/v0.0.22...v0.0.23) - 2024-09-17
### Other
- update Cargo.toml dependencies
## [0.0.16](https://github.com/edera-dev/krata/compare/v0.0.15...v0.0.16) - 2024-08-14
### Added
- *(krata)* prepare for workload rework ([#276](https://github.com/edera-dev/krata/pull/276))
### Fixed
- *(idm)* reimplement packet processing algorithm ([#330](https://github.com/edera-dev/krata/pull/330))
- *(power-trap-eacces)* gracefully handle hypercall errors in power management ([#325](https://github.com/edera-dev/krata/pull/325))
## [0.0.22](https://github.com/edera-dev/krata/compare/v0.0.21...v0.0.22) - 2024-09-16
### Other
- *(o11y)* add more debug logs to daemon & runtime ([#318](https://github.com/edera-dev/krata/pull/318))
## [0.0.15](https://github.com/edera-dev/krata/compare/v0.0.14...v0.0.15) - 2024-08-06
### Fixed
- *(zone)* waitpid should be limited when no child processes exist (fixes [#304](https://github.com/edera-dev/krata/pull/304)) ([#305](https://github.com/edera-dev/krata/pull/305))
## [0.0.14](https://github.com/edera-dev/krata/compare/v0.0.13...v0.0.14) - 2024-08-06
### Added
- *(oci)* use local index as resolution cache when appropriate, fixes [#289](https://github.com/edera-dev/krata/pull/289) ([#294](https://github.com/edera-dev/krata/pull/294))
### Fixed
- *(idm)* process all idm messages in the same frame and use childwait exit notification for exec (fixes [#290](https://github.com/edera-dev/krata/pull/290)) ([#302](https://github.com/edera-dev/krata/pull/302))
### Other
- init: mount /proc with hidepid=1 ([#277](https://github.com/edera-dev/krata/pull/277))
- update Cargo.toml dependencies
## [0.0.13](https://github.com/edera-dev/krata/compare/v0.0.12...v0.0.13) - 2024-07-19
### Added
- *(kratactl)* rework cli to use subcommands ([#268](https://github.com/edera-dev/krata/pull/268))
- *(krata)* rename guest to zone ([#266](https://github.com/edera-dev/krata/pull/266))
### Other
- *(deps)* upgrade dependencies, fix hyper io traits issue ([#252](https://github.com/edera-dev/krata/pull/252))
- update Cargo.lock dependencies
- update Cargo.toml dependencies
## [0.0.12](https://github.com/edera-dev/krata/compare/v0.0.11...v0.0.12) - 2024-07-12
### Added
- *(oci)* add configuration value for oci seed file ([#220](https://github.com/edera-dev/krata/pull/220))
- *(power-management-defaults)* set an initial power management policy ([#219](https://github.com/edera-dev/krata/pull/219))
### Fixed
- *(daemon)* decrease rate of runtime reconcile ([#224](https://github.com/edera-dev/krata/pull/224))
- *(power)* ensure that xeon cpus with cpu gaps are not detected as p/e compatible ([#218](https://github.com/edera-dev/krata/pull/218))
- *(runtime)* use iommu only if devices are needed ([#243](https://github.com/edera-dev/krata/pull/243))
### Other
- Power management core functionality ([#217](https://github.com/edera-dev/krata/pull/217))
- *(powermgmt)* disable for now as a hackfix ([#242](https://github.com/edera-dev/krata/pull/242))
- Initial fluentd support ([#205](https://github.com/edera-dev/krata/pull/205))
- update Cargo.toml dependencies
- Use native loopdev implementation instead of loopdev-3 ([#209](https://github.com/edera-dev/krata/pull/209))
## [0.0.11](https://github.com/edera-dev/krata/compare/v0.0.10...v0.0.11) - 2024-06-23
### Added
- pci passthrough ([#114](https://github.com/edera-dev/krata/pull/114))
- *(runtime)* concurrent ip allocation ([#151](https://github.com/edera-dev/krata/pull/151))
- *(xen)* dynamic platform architecture ([#194](https://github.com/edera-dev/krata/pull/194))
### Fixed
- *(oci)* remove file size limit ([#142](https://github.com/edera-dev/krata/pull/142))
- *(oci)* use mirror.gcr.io as a mirror to docker hub ([#141](https://github.com/edera-dev/krata/pull/141))
### Other
- first pass of krata as an isolation engine
- *(xen)* split platform support into separate crate ([#195](https://github.com/edera-dev/krata/pull/195))
- *(xen)* move device creation into transaction interface ([#196](https://github.com/edera-dev/krata/pull/196))
## [0.0.10](https://github.com/edera-dev/krata/compare/v0.0.9...v0.0.10) - 2024-04-22
### Added
- implement guest exec ([#107](https://github.com/edera-dev/krata/pull/107))
- implement kernel / initrd oci image support ([#103](https://github.com/edera-dev/krata/pull/103))
- idm v2 ([#102](https://github.com/edera-dev/krata/pull/102))
- oci concurrency improvements ([#95](https://github.com/edera-dev/krata/pull/95))
- oci tar format, bit-perfect disk storage for config and manifest, concurrent image pulls ([#88](https://github.com/edera-dev/krata/pull/88))
### Fixed
- oci cache store should fallback to copy when rename won't work ([#96](https://github.com/edera-dev/krata/pull/96))
### Other
- update Cargo.lock dependencies
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
### Added
- oci compliance work ([#85](https://github.com/edera-dev/krata/pull/85))
- oci packer can now use mksquashfs if available ([#70](https://github.com/edera-dev/krata/pull/70))
- basic kratactl top command ([#72](https://github.com/edera-dev/krata/pull/72))
- idm snooping ([#71](https://github.com/edera-dev/krata/pull/71))
- implement oci image progress ([#64](https://github.com/edera-dev/krata/pull/64))
- guest metrics support ([#46](https://github.com/edera-dev/krata/pull/46))
### Other
- init: default to xterm if TERM is not set ([#52](https://github.com/edera-dev/krata/pull/52))
- update Cargo.toml dependencies
## [0.0.8](https://github.com/edera-dev/krata/compare/v0.0.7...v0.0.8) - 2024-04-09
### Other
- update Cargo.lock dependencies
## [0.0.7](https://github.com/edera-dev/krata/compare/v0.0.6...v0.0.7) - 2024-04-09
### Other
- update Cargo.toml dependencies
- update Cargo.lock dependencies
## [0.0.6](https://github.com/edera-dev/krata/compare/v0.0.5...v0.0.6) - 2024-04-09
### Fixed
- increase channel acquisition timeout to support lower performance hosts ([#36](https://github.com/edera-dev/krata/pull/36))
### Other
- update Cargo.toml dependencies
- update Cargo.lock dependencies
## [0.0.5](https://github.com/edera-dev/krata/compare/v0.0.4...v0.0.5) - 2024-04-09
### Added
- *(ctl)* add help and about to commands and arguments ([#25](https://github.com/edera-dev/krata/pull/25))
### Other
- update Cargo.toml dependencies
- update Cargo.lock dependencies
## [0.0.4](https://github.com/edera-dev/krata/releases/tag/v${version}) - 2024-04-03
### Other
- implement automatic releases
- reimplement console to utilize channels, and provide logs support
- set hostname from launch config
- implement event stream retries
- work on parallel reconciliation
- implement parallel guest reconciliation
- log when a guest start failures occurs
- remove device restriction
- setup loopback interface
- place running tasks in cgroup
- preparations for xen control-plane

View File

@ -30,5 +30,4 @@ While it's totally fine to simply bring it up on our Discord, we encourage openi
[Code of Conduct]: ./CODE_OF_CONDUCT.md
[Security Policy]: ./SECURITY.md
[Development Guide]: ./DEV.md
[good-first-issues]: https://github.com/edera-dev/krata/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22

3164
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,5 @@
[workspace]
members = [
"crates/build",
"crates/krata",
"crates/oci",
"crates/zone",
"crates/runtime",
"crates/daemon",
"crates/network",
"crates/ctl",
"crates/xen/xencall",
"crates/xen/xenclient",
"crates/xen/xenevtchn",
@ -18,117 +10,34 @@ members = [
resolver = "2"
[workspace.package]
version = "0.0.20"
version = "0.0.24"
homepage = "https://krata.dev"
license = "Apache-2.0"
license = "GPL-2.0-or-later"
repository = "https://github.com/edera-dev/krata"
[workspace.dependencies]
anyhow = "1.0"
arrayvec = "0.7.6"
async-compression = "0.4.12"
async-stream = "0.3.5"
async-trait = "0.1.81"
backhand = "0.18.0"
base64 = "0.22.1"
async-trait = "0.1.85"
bit-vec = "0.8.0"
byteorder = "1"
bytes = "1.7.1"
c2rust-bitfields = "0.18.0"
cgroups-rs = "0.3.4"
circular-buffer = "0.1.7"
comfy-table = "7.1.1"
crossterm = "0.28.1"
ctrlc = "3.4.5"
elf = "0.7.4"
env_logger = "0.11.5"
etherparse = "0.15.0"
fancy-duration = "0.9.2"
env_logger = "0.11.6"
flate2 = "1.0"
futures = "0.3.30"
hyper = "1.4.1"
hyper-util = "0.1.7"
human_bytes = "0.4"
indexmap = "2.4.0"
indicatif = "0.17.8"
ipnetwork = "0.20.0"
indexmap = "2.6.0"
libc = "0.2"
log = "0.4.22"
loopdev-3 = "0.5.1"
krata-advmac = "1.1.0"
krata-tokio-tar = "0.4.0"
memchr = "2"
nix = "0.29.0"
oci-spec = "0.6.8"
once_cell = "1.19.0"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
pin-project-lite = "0.2.14"
platform-info = "2.0.3"
prost = "0.13.1"
prost-build = "0.13.1"
prost-reflect-build = "0.14.0"
prost-types = "0.13.1"
pty-process = "0.4.0"
rand = "0.8.5"
ratatui = "0.28.1"
redb = "2.1.2"
regex = "1.10.6"
rtnetlink = "0.14.1"
scopeguard = "1.2.0"
serde_json = "1.0.127"
serde_yaml = "0.9"
sha256 = "1.5.0"
signal-hook = "0.3.17"
regex = "1.11.1"
slice-copy = "0.3.0"
smoltcp = "0.11.0"
sysinfo = "0.31.3"
termtree = "0.5.1"
thiserror = "1.0"
tokio-tun = "0.11.5"
tokio-util = "0.7.11"
toml = "0.8.19"
tonic-build = "0.12.2"
tower = "0.5.0"
udp-stream = "0.0.12"
url = "2.5.2"
walkdir = "2"
thiserror = "2.0.9"
xz2 = "0.1"
[workspace.dependencies.clap]
version = "4.5.16"
features = ["derive"]
[workspace.dependencies.prost-reflect]
version = "0.14.0"
features = ["derive"]
[workspace.dependencies.reqwest]
version = "0.12.7"
default-features = false
features = ["rustls-tls"]
[workspace.dependencies.serde]
version = "1.0.209"
features = ["derive"]
[workspace.dependencies.sys-mount]
version = "3.0.0"
default-features = false
[workspace.dependencies.tokio]
version = "1.39.3"
version = "1.41.1"
features = ["full"]
[workspace.dependencies.tokio-stream]
version = "0.1"
features = ["io-util", "net"]
[workspace.dependencies.tonic]
version = "0.12.2"
features = ["tls"]
[workspace.dependencies.uuid]
version = "1.10.0"
version = "1.11.0"
features = ["v4"]
[profile.release]

View File

@ -1,34 +0,0 @@
[target.aarch64-unknown-linux-gnu]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.aarch64-unknown-linux-musl]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.x86_64-unknown-linux-gnu]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.x86_64-unknown-linux-musl]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.riscv64gc-unknown-linux-gnu]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.x86_64-unknown-freebsd]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]
[target.x86_64-unknown-netbsd]
pre-build = [
"apt-get update && apt-get --assume-yes install protobuf-compiler"
]

98
DEV.md
View File

@ -1,98 +0,0 @@
# Development Guide
## Structure
krata is composed of four major executables:
| Executable | Runs On | User Interaction | Dev Runner | Code Path |
|------------|---------|------------------|--------------------------|----------------|
| kratad | host | backend daemon | ./hack/debug/kratad.sh | crates/daemon |
| kratanet | host | backend daemon | ./hack/debug/kratanet.sh | crates/network |
| kratactl | host | CLI tool | ./hack/debug/kratactl.sh | crates/ctl |
| kratazone | zone | none, zone init | N/A | crates/zone |
You will find the code to each executable available in the bin/ and src/ directories inside
it's corresponding code path from the above table.
## Environment
| Component | Specification | Notes |
|--------------|---------------|----------------------------------------------------------------------------------|
| Architecture | x86_64 | aarch64 support is still in development |
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata zones room |
| Xen | 4.17+ | |
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
| rustup | any | Install Rustup from https://rustup.rs |
## Setup Guide
1. Install the specified Debian version on a x86_64 host _capable_ of KVM (NOTE: KVM is not used, Xen is a type-1 hypervisor).
2. Install required packages:
```sh
$ apt install git xen-system-amd64 build-essential musl-tools \
protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
```
3. Install [rustup](https://rustup.rs) for managing a Rust environment.
Make sure to install the targets that you need for krata:
```sh
$ rustup target add x86_64-unknown-linux-gnu
$ rustup target add x86_64-unknown-linux-musl
```
4. Configure `/etc/default/grub.d/xen.cfg` to give krata zones some room:
```sh
# Configure dom0_mem to be 4GB, but leave the rest of the RAM for krata zones.
GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=4G,max:4G"
```
After changing the grub config, update grub: `update-grub`
Then reboot to boot the system as a Xen dom0.
You can validate that Xen is setup by running `dmesg | grep "Hypervisor detected"` and ensuring it returns a line like `Hypervisor detected: Xen PV`, if that is missing, the host is not running under Xen.
5. Clone the krata source code:
```sh
$ git clone https://github.com/edera-dev/krata.git krata
$ cd krata
```
6. Fetch the zone kernel image:
```sh
$ ./hack/kernel/fetch.sh -u
```
7. Copy the zone kernel artifacts to `/var/lib/krata/zone/kernel` so it is automatically detected by kratad:
```sh
$ mkdir -p /var/lib/krata/zone
$ cp target/kernel/kernel-x86_64 /var/lib/krata/zone/kernel
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/zone/addons.squashfs
```
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
10. Run `kratactl` to launch a zone:
```sh
$ ./hack/debug/kratactl.sh zone launch --attach alpine:latest
```
To detach from the zone console, use `Ctrl + ]` on your keyboard.
To list the running zones, run:
```sh
$ ./hack/debug/kratactl.sh zone list
```
To destroy a running zone, copy it's UUID from either the launch command or the zone list and run:
```sh
$ ./hack/debug/kratactl.sh zone destroy ZONE_UUID
```

6
FAQ.md
View File

@ -1,11 +1,5 @@
# Frequently Asked Questions
## How does krata currently work?
The krata isolation engine makes it possible to launch OCI containers on a Xen hypervisor without utilizing the Xen userspace tooling. krata contains just enough of the userspace of Xen (reimplemented in Rust) to start an x86_64 Xen Linux PV guest, and implements a Linux init process that can boot an OCI container. It does so by converting an OCI image into a squashfs/erofs file and packaging basic startup data in a bundle that the init container can read.
In addition, due to the desire to reduce dependence on the dom0 network, krata contains a networking daemon called kratanet. kratanet listens for krata guests to startup and launches a userspace networking environment. krata guests can access the dom0 networking stack via the proxynat, which that makes it possible to communicate over UDP, TCP, and ICMP (echo only) to the outside world. In addition, each krata guest is provided a "gateway" IP (both in IPv4 and IPv6) which utilizes smoltcp to provide a virtual host. That virtual host in the future could dial connections into the container to access container networking resources.
## Why utilize Xen instead of KVM?
Xen is a very interesting technology, and Edera believes that type-1 hypervisors are ideal for security. Most OCI isolation techniques use KVM, which is not a type-1 hypervisor, and thus is subject to the security limitations of the OS kernel. A type-1 hypervisor on the other hand provides a minimal attack surface upon which less-trusted guests can be launched on top of.

476
LICENSE
View File

@ -1,201 +1,339 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
1. Definitions.
Preamble
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
The precise terms and conditions for copying, distribution and
modification follow.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
END OF TERMS AND CONDITIONS
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
APPENDIX: How to apply the Apache License to your work.
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
Copyright 2024 Edera Inc.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
http://www.apache.org/licenses/LICENSE-2.0
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -1,28 +1,22 @@
# krata
An isolation engine for securing compute workloads.
```bash
$ kratactl zone launch -a alpine:latest
```
krata is an implementation of a Xen control-plane in Rust.
![license](https://img.shields.io/github/license/edera-dev/krata)
![discord](https://img.shields.io/discord/1207447453083766814?label=discord)
[![check](https://github.com/edera-dev/krata/actions/workflows/check.yml/badge.svg)](https://github.com/edera-dev/krata/actions/workflows/check.yml)
[![nightly](https://github.com/edera-dev/krata/actions/workflows/nightly.yml/badge.svg)](https://github.com/edera-dev/krata/actions/workflows/nightly.yml)
---
- [Frequently Asked Questions](FAQ.md)
- [Development Guide](DEV.md)
- [Code of Conduct](CODE_OF_CONDUCT.md)
- [Security Policy](SECURITY.md)
- [Edera Technical Overview](technical-overview.md)
## Introduction
krata is a single-host workload isolation service. It isolates workloads using a type-1 hypervisor, providing a tight security boundary while preserving performance.
krata utilizes the core of the Xen hypervisor with a fully memory-safe Rust control plane.
krata is a component of [Edera Protect](https://edera.dev/protect-kubernetes), for secure-by-design infrastructure.
It provides the base layer upon which Edera Protect zones are built on: a securely booted virtualization guest on the Xen hypervisor.
## Hardware Support
@ -30,3 +24,4 @@ krata utilizes the core of the Xen hypervisor with a fully memory-safe Rust cont
|--------------|------------------|-------------------------|
| x86_64 | 100% Completed | None, Intel VT-x, AMD-V |
| aarch64 | 10% Completed | AArch64 virtualization |

View File

@ -1,25 +0,0 @@
[package]
name = "krata-buildtools"
description = "Build tools for krata."
license.workspace = true
version.workspace = true
homepage.workspace = true
repository.workspace = true
edition = "2021"
resolver = "2"
publish = false
[dependencies]
anyhow = { workspace = true }
env_logger = { workspace = true }
oci-spec = { workspace = true }
scopeguard = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
krata-oci = { path = "../oci", version = "^0.0.20" }
krata-tokio-tar = { workspace = true }
uuid = { workspace = true }
[[bin]]
name = "build-fetch-kernel"
path = "bin/fetch_kernel.rs"

View File

@ -1,121 +0,0 @@
use std::{
env::{self, args},
path::PathBuf,
};
use anyhow::{anyhow, Result};
use env_logger::Env;
use krataoci::{
name::ImageName,
packer::{service::OciPackerService, OciPackedFormat},
progress::OciProgressContext,
registry::OciPlatform,
};
use oci_spec::image::{Arch, Os};
use tokio::{
fs::{self, File},
io::BufReader,
};
use tokio_stream::StreamExt;
use tokio_tar::Archive;
use uuid::Uuid;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
fs::create_dir_all("target/kernel").await?;
let arch = env::var("TARGET_ARCH").map_err(|_| anyhow!("missing TARGET_ARCH env var"))?;
println!("kernel architecture: {}", arch);
let platform = OciPlatform::new(
Os::Linux,
match arch.as_str() {
"x86_64" => Arch::Amd64,
"aarch64" => Arch::ARM64,
_ => {
return Err(anyhow!("unknown architecture '{}'", arch));
}
},
);
let image = ImageName::parse(&args().nth(1).unwrap())?;
let mut cache_dir = env::temp_dir().clone();
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
fs::create_dir_all(&cache_dir).await?;
let _delete_cache_dir = scopeguard::guard(cache_dir.clone(), |dir| {
let _ = std::fs::remove_dir_all(dir);
});
let (context, _) = OciProgressContext::create();
let service = OciPackerService::new(None, &cache_dir, platform).await?;
let packed = service
.request(image.clone(), OciPackedFormat::Tar, false, true, context)
.await?;
let annotations = packed
.manifest
.item()
.annotations()
.clone()
.unwrap_or_default();
let Some(format) = annotations.get("dev.edera.kernel.format") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.format' annotation"
));
};
let Some(version) = annotations.get("dev.edera.kernel.version") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.version' annotation"
));
};
let Some(flavor) = annotations.get("dev.edera.kernel.flavor") else {
return Err(anyhow!(
"image manifest missing 'dev.edera.kernel.flavor' annotation"
));
};
if format != "1" {
return Err(anyhow!("kernel format version '{}' is unknown", format));
}
let file = BufReader::new(File::open(packed.path).await?);
let mut archive = Archive::new(file);
let mut entries = archive.entries()?;
let kernel_image_tar_path = PathBuf::from("kernel/image");
let kernel_addons_tar_path = PathBuf::from("kernel/addons.squashfs");
let kernel_image_out_path = PathBuf::from(format!("target/kernel/kernel-{}", arch));
let kernel_addons_out_path = PathBuf::from(format!("target/kernel/addons-{}.squashfs", arch));
if kernel_image_out_path.exists() {
fs::remove_file(&kernel_image_out_path).await?;
}
if kernel_addons_out_path.exists() {
fs::remove_file(&kernel_addons_out_path).await?;
}
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?.to_path_buf();
if !entry.header().entry_type().is_file() {
continue;
}
if path == kernel_image_tar_path {
entry.unpack(&kernel_image_out_path).await?;
} else if path == kernel_addons_tar_path {
entry.unpack(&kernel_addons_out_path).await?;
}
}
if !kernel_image_out_path.exists() {
return Err(anyhow!("image did not contain a file named /kernel/image"));
}
println!("kernel version: v{}", version);
println!("kernel flavor: {}", flavor);
Ok(())
}

View File

@ -1,42 +0,0 @@
[package]
name = "krata-ctl"
description = "Command-line tool to control the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
repository.workspace = true
edition = "2021"
resolver = "2"
[dependencies]
anyhow = { workspace = true }
async-stream = { workspace = true }
base64 = { workspace = true }
clap = { workspace = true }
comfy-table = { workspace = true }
crossterm = { workspace = true, features = ["event-stream"] }
ctrlc = { workspace = true, features = ["termination"] }
env_logger = { workspace = true }
fancy-duration = { workspace = true }
human_bytes = { workspace = true }
indicatif = { workspace = true }
krata = { path = "../krata", version = "^0.0.20" }
log = { workspace = true }
prost-reflect = { workspace = true, features = ["serde"] }
prost-types = { workspace = true }
ratatui = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
termtree = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
tonic = { workspace = true }
tower = { workspace = true }
[lib]
name = "kratactl"
[[bin]]
name = "kratactl"
path = "bin/control.rs"

View File

@ -1,11 +0,0 @@
use anyhow::Result;
use clap::Parser;
use env_logger::Env;
use kratactl::cli::ControlCommand;
#[tokio::main]
async fn main() -> Result<()> {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
ControlCommand::parse().run().await
}

View File

@ -1,128 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
use krata::{
events::EventStream,
v1::control::{control_service_client::ControlServiceClient, DeviceInfo, ListDevicesRequest},
};
use serde_json::Value;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum DeviceListFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
Simple,
}
#[derive(Parser)]
#[command(about = "List device information")]
pub struct DeviceListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: DeviceListFormat,
}
impl DeviceListCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let reply = client
.list_devices(ListDevicesRequest {})
.await?
.into_inner();
let mut devices = reply.devices;
devices.sort_by(|a, b| a.name.cmp(&b.name));
match self.format {
DeviceListFormat::Table => {
self.print_devices_table(devices)?;
}
DeviceListFormat::Simple => {
for device in devices {
println!("{}\t{}\t{}", device.name, device.claimed, device.owner);
}
}
DeviceListFormat::Json | DeviceListFormat::JsonPretty | DeviceListFormat::Yaml => {
let mut values = Vec::new();
for device in devices {
let message = proto2dynamic(device)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == DeviceListFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == DeviceListFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
DeviceListFormat::Jsonl => {
for device in devices {
let message = proto2dynamic(device)?;
println!("{}", serde_json::to_string(&message)?);
}
}
DeviceListFormat::KeyValue => {
self.print_key_value(devices)?;
}
}
Ok(())
}
fn print_devices_table(&self, devices: Vec<DeviceInfo>) -> Result<()> {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["name", "status", "owner"]);
for device in devices {
let status_text = if device.claimed {
"claimed"
} else {
"available"
};
let status_color = if device.claimed {
Color::Blue
} else {
Color::Green
};
table.add_row(vec![
Cell::new(device.name),
Cell::new(status_text).fg(status_color),
Cell::new(device.owner),
]);
}
if table.is_empty() {
println!("no devices configured");
} else {
println!("{}", table);
}
Ok(())
}
fn print_key_value(&self, devices: Vec<DeviceInfo>) -> Result<()> {
for device in devices {
let kvs = proto2kv(device)?;
println!("{}", kv2line(kvs));
}
Ok(())
}
}

View File

@ -1,44 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
use crate::cli::device::list::DeviceListCommand;
pub mod list;
#[derive(Parser)]
#[command(about = "Manage the devices on the isolation engine")]
pub struct DeviceCommand {
#[command(subcommand)]
subcommand: DeviceCommands,
}
impl DeviceCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum DeviceCommands {
List(DeviceListCommand),
}
impl DeviceCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
DeviceCommands::List(list) => list.run(client, events).await,
}
}
}

View File

@ -1,104 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use comfy_table::presets::UTF8_FULL_CONDENSED;
use comfy_table::{Cell, Table};
use krata::v1::control::{
control_service_client::ControlServiceClient, GetHostCpuTopologyRequest, HostCpuTopologyClass,
};
use serde_json::Value;
use crate::format::{kv2line, proto2dynamic, proto2kv};
use tonic::{transport::Channel, Request};
fn class_to_str(input: HostCpuTopologyClass) -> String {
match input {
HostCpuTopologyClass::Standard => "Standard".to_string(),
HostCpuTopologyClass::Performance => "Performance".to_string(),
HostCpuTopologyClass::Efficiency => "Efficiency".to_string(),
}
}
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum HostCpuTopologyFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Display information about the host CPU topology")]
pub struct HostCpuTopologyCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: HostCpuTopologyFormat,
}
impl HostCpuTopologyCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.get_host_cpu_topology(Request::new(GetHostCpuTopologyRequest {}))
.await?
.into_inner();
match self.format {
HostCpuTopologyFormat::Table => {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["id", "node", "socket", "core", "thread", "class"]);
for (i, cpu) in response.cpus.iter().enumerate() {
table.add_row(vec![
Cell::new(i),
Cell::new(cpu.node),
Cell::new(cpu.socket),
Cell::new(cpu.core),
Cell::new(cpu.thread),
Cell::new(class_to_str(cpu.class())),
]);
}
if !table.is_empty() {
println!("{}", table);
}
}
HostCpuTopologyFormat::Json
| HostCpuTopologyFormat::JsonPretty
| HostCpuTopologyFormat::Yaml => {
let mut values = Vec::new();
for cpu in response.cpus {
let message = proto2dynamic(cpu)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == HostCpuTopologyFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == HostCpuTopologyFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
HostCpuTopologyFormat::Jsonl => {
for cpu in response.cpus {
let message = proto2dynamic(cpu)?;
println!("{}", serde_json::to_string(&message)?);
}
}
HostCpuTopologyFormat::KeyValue => {
for cpu in response.cpus {
let kvs = proto2kv(cpu)?;
println!("{}", kv2line(kvs),);
}
}
}
Ok(())
}
}

View File

@ -1,23 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{
control_service_client::ControlServiceClient, ReadHypervisorConsoleRequest,
};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Display hypervisor console output")]
pub struct HostHvConsoleCommand {}
impl HostHvConsoleCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.read_hypervisor_console(Request::new(ReadHypervisorConsoleRequest {}))
.await?
.into_inner();
print!("{}", response.data);
Ok(())
}
}

View File

@ -1,157 +0,0 @@
use anyhow::Result;
use base64::Engine;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
idm::{internal, serialize::IdmSerializable, transport::IdmTransportPacketForm},
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio_stream::StreamExt;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, value2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum HostIdmSnoopFormat {
Simple,
Jsonl,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Snoop on the IDM bus")]
pub struct HostIdmSnoopCommand {
#[arg(short, long, default_value = "simple", help = "Output format")]
format: HostIdmSnoopFormat,
}
impl HostIdmSnoopCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let mut stream = client.snoop_idm(SnoopIdmRequest {}).await?.into_inner();
while let Some(reply) = stream.next().await {
let reply = reply?;
let Some(line) = convert_idm_snoop(reply) else {
continue;
};
match self.format {
HostIdmSnoopFormat::Simple => {
self.print_simple(line)?;
}
HostIdmSnoopFormat::Jsonl => {
let encoded = serde_json::to_string(&line)?;
println!("{}", encoded.trim());
}
HostIdmSnoopFormat::KeyValue => {
self.print_key_value(line)?;
}
}
}
Ok(())
}
fn print_simple(&self, line: IdmSnoopLine) -> Result<()> {
let encoded = if !line.packet.decoded.is_null() {
serde_json::to_string(&line.packet.decoded)?
} else {
base64::prelude::BASE64_STANDARD.encode(&line.packet.data)
};
println!(
"({} -> {}) {} {} {}",
line.from, line.to, line.packet.id, line.packet.form, encoded
);
Ok(())
}
fn print_key_value(&self, line: IdmSnoopLine) -> Result<()> {
let kvs = value2kv(serde_json::to_value(line)?)?;
println!("{}", kv2line(kvs));
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopLine {
pub from: String,
pub to: String,
pub packet: IdmSnoopData,
}
#[derive(Serialize, Deserialize)]
pub struct IdmSnoopData {
pub id: u64,
pub channel: u64,
pub form: String,
pub data: String,
pub decoded: Value,
}
pub fn convert_idm_snoop(reply: SnoopIdmReply) -> Option<IdmSnoopLine> {
let packet = &(reply.packet?);
let decoded = if packet.channel == 0 {
match packet.form() {
IdmTransportPacketForm::Event => internal::Event::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok()),
IdmTransportPacketForm::Request
| IdmTransportPacketForm::StreamRequest
| IdmTransportPacketForm::StreamRequestUpdate => {
internal::Request::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
IdmTransportPacketForm::Response | IdmTransportPacketForm::StreamResponseUpdate => {
internal::Response::decode(&packet.data)
.ok()
.and_then(|event| proto2dynamic(event).ok())
}
_ => None,
}
} else {
None
};
let decoded = decoded
.and_then(|message| serde_json::to_value(message).ok())
.unwrap_or(Value::Null);
let data = IdmSnoopData {
id: packet.id,
channel: packet.channel,
form: match packet.form() {
IdmTransportPacketForm::Raw => "raw".to_string(),
IdmTransportPacketForm::Event => "event".to_string(),
IdmTransportPacketForm::Request => "request".to_string(),
IdmTransportPacketForm::Response => "response".to_string(),
IdmTransportPacketForm::StreamRequest => "stream-request".to_string(),
IdmTransportPacketForm::StreamRequestUpdate => "stream-request-update".to_string(),
IdmTransportPacketForm::StreamRequestClosed => "stream-request-closed".to_string(),
IdmTransportPacketForm::StreamResponseUpdate => "stream-response-update".to_string(),
IdmTransportPacketForm::StreamResponseClosed => "stream-response-closed".to_string(),
_ => format!("unknown-{}", packet.form),
},
data: base64::prelude::BASE64_STANDARD.encode(&packet.data),
decoded,
};
Some(IdmSnoopLine {
from: reply.from,
to: reply.to,
packet: data,
})
}

View File

@ -1,59 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
use crate::cli::host::cpu_topology::HostCpuTopologyCommand;
use crate::cli::host::hv_console::HostHvConsoleCommand;
use crate::cli::host::idm_snoop::HostIdmSnoopCommand;
use crate::cli::host::status::HostStatusCommand;
pub mod cpu_topology;
pub mod hv_console;
pub mod idm_snoop;
pub mod status;
#[derive(Parser)]
#[command(about = "Manage the host of the isolation engine")]
pub struct HostCommand {
#[command(subcommand)]
subcommand: HostCommands,
}
impl HostCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum HostCommands {
CpuTopology(HostCpuTopologyCommand),
Status(HostStatusCommand),
IdmSnoop(HostIdmSnoopCommand),
HvConsole(HostHvConsoleCommand),
}
impl HostCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
HostCommands::CpuTopology(cpu_topology) => cpu_topology.run(client).await,
HostCommands::Status(status) => status.run(client).await,
HostCommands::IdmSnoop(snoop) => snoop.run(client, events).await,
HostCommands::HvConsole(hvconsole) => hvconsole.run(client).await,
}
}
}

View File

@ -1,60 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::v1::control::{control_service_client::ControlServiceClient, GetHostStatusRequest};
use crate::format::{kv2line, proto2dynamic, proto2kv};
use tonic::{transport::Channel, Request};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum HostStatusFormat {
Simple,
Json,
JsonPretty,
Yaml,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Get information about the host")]
pub struct HostStatusCommand {
#[arg(short, long, default_value = "simple", help = "Output format")]
format: HostStatusFormat,
}
impl HostStatusCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.get_host_status(Request::new(GetHostStatusRequest {}))
.await?
.into_inner();
match self.format {
HostStatusFormat::Simple => {
println!("Host UUID: {}", response.host_uuid);
println!("Host Domain: {}", response.host_domid);
println!("Krata Version: {}", response.krata_version);
println!("Host IPv4: {}", response.host_ipv4);
println!("Host IPv6: {}", response.host_ipv6);
println!("Host Ethernet Address: {}", response.host_mac);
}
HostStatusFormat::Json | HostStatusFormat::JsonPretty | HostStatusFormat::Yaml => {
let message = proto2dynamic(response)?;
let value = serde_json::to_value(message)?;
let encoded = if self.format == HostStatusFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == HostStatusFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
HostStatusFormat::KeyValue => {
let kvs = proto2kv(response)?;
println!("{}", kv2line(kvs),);
}
}
Ok(())
}
}

View File

@ -1,44 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
use crate::cli::image::pull::ImagePullCommand;
pub mod pull;
#[derive(Parser)]
#[command(about = "Manage the images on the isolation engine")]
pub struct ImageCommand {
#[command(subcommand)]
subcommand: ImageCommands,
}
impl ImageCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum ImageCommands {
Pull(ImagePullCommand),
}
impl ImageCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
match self {
ImageCommands::Pull(pull) => pull.run(client).await,
}
}
}

View File

@ -1,48 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::v1::{
common::OciImageFormat,
control::{control_service_client::ControlServiceClient, PullImageRequest},
};
use tonic::transport::Channel;
use crate::pull::pull_interactive_progress;
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
pub enum ImagePullImageFormat {
Squashfs,
Erofs,
Tar,
}
#[derive(Parser)]
#[command(about = "Pull an image into the cache")]
pub struct ImagePullCommand {
#[arg(help = "Image name")]
image: String,
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
image_format: ImagePullImageFormat,
#[arg(short = 'o', long, help = "Overwrite image cache")]
overwrite_cache: bool,
}
impl ImagePullCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let response = client
.pull_image(PullImageRequest {
image: self.image.clone(),
format: match self.image_format {
ImagePullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
ImagePullImageFormat::Erofs => OciImageFormat::Erofs.into(),
ImagePullImageFormat::Tar => OciImageFormat::Tar.into(),
},
overwrite_cache: self.overwrite_cache,
update: true,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
println!("{}", reply.digest);
Ok(())
}
}

View File

@ -1,90 +0,0 @@
pub mod device;
pub mod host;
pub mod image;
pub mod network;
pub mod zone;
use crate::cli::device::DeviceCommand;
use crate::cli::host::HostCommand;
use crate::cli::image::ImageCommand;
use crate::cli::zone::ZoneCommand;
use anyhow::{anyhow, Result};
use clap::Parser;
use krata::{
client::ControlClientProvider,
events::EventStream,
v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest},
};
use network::NetworkCommand;
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(version, about = "Control the krata isolation engine")]
pub struct ControlCommand {
#[arg(
short,
long,
help = "The connection URL to the krata isolation engine",
default_value = "unix:///var/lib/krata/daemon.socket"
)]
connection: String,
#[command(subcommand)]
command: ControlCommands,
}
#[allow(clippy::large_enum_variant)]
#[derive(Parser)]
pub enum ControlCommands {
Zone(ZoneCommand),
Image(ImageCommand),
Network(NetworkCommand),
Device(DeviceCommand),
Host(HostCommand),
}
impl ControlCommand {
pub async fn run(self) -> Result<()> {
let client = ControlClientProvider::dial(self.connection.parse()?).await?;
let events = EventStream::open(client.clone()).await?;
self.command.run(client, events).await
}
}
impl ControlCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
ControlCommands::Zone(zone) => zone.run(client, events).await,
ControlCommands::Network(network) => network.run(client, events).await,
ControlCommands::Image(image) => image.run(client, events).await,
ControlCommands::Device(device) => device.run(client, events).await,
ControlCommands::Host(host) => host.run(client, events).await,
}
}
}
pub async fn resolve_zone(
client: &mut ControlServiceClient<Channel>,
name: &str,
) -> Result<String> {
let reply = client
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
name: name.to_string(),
}))
.await?
.into_inner();
if !reply.zone_id.is_empty() {
Ok(reply.zone_id)
} else {
Err(anyhow!("unable to resolve zone '{}'", name))
}
}

View File

@ -1,43 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use reservation::NetworkReservationCommand;
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
pub mod reservation;
#[derive(Parser)]
#[command(about = "Manage the network on the isolation engine")]
pub struct NetworkCommand {
#[command(subcommand)]
subcommand: NetworkCommands,
}
impl NetworkCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum NetworkCommands {
Reservation(NetworkReservationCommand),
}
impl NetworkCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
NetworkCommands::Reservation(reservation) => reservation.run(client, events).await,
}
}
}

View File

@ -1,125 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Table};
use krata::{
events::EventStream,
v1::{
common::NetworkReservation,
control::{control_service_client::ControlServiceClient, ListNetworkReservationsRequest},
},
};
use serde_json::Value;
use tonic::transport::Channel;
use crate::format::{kv2line, proto2dynamic, proto2kv};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum NetworkReservationListFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
Simple,
}
#[derive(Parser)]
#[command(about = "List network reservation information")]
pub struct NetworkReservationListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: NetworkReservationListFormat,
}
impl NetworkReservationListCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let reply = client
.list_network_reservations(ListNetworkReservationsRequest {})
.await?
.into_inner();
let mut reservations = reply.reservations;
reservations.sort_by(|a, b| a.uuid.cmp(&b.uuid));
match self.format {
NetworkReservationListFormat::Table => {
self.print_reservations_table(reservations)?;
}
NetworkReservationListFormat::Simple => {
for reservation in reservations {
println!(
"{}\t{}\t{}\t{}",
reservation.uuid, reservation.ipv4, reservation.ipv6, reservation.mac
);
}
}
NetworkReservationListFormat::Json
| NetworkReservationListFormat::JsonPretty
| NetworkReservationListFormat::Yaml => {
let mut values = Vec::new();
for device in reservations {
let message = proto2dynamic(device)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == NetworkReservationListFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == NetworkReservationListFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
NetworkReservationListFormat::Jsonl => {
for device in reservations {
let message = proto2dynamic(device)?;
println!("{}", serde_json::to_string(&message)?);
}
}
NetworkReservationListFormat::KeyValue => {
self.print_key_value(reservations)?;
}
}
Ok(())
}
fn print_reservations_table(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["uuid", "ipv4", "ipv6", "mac"]);
for reservation in reservations {
table.add_row(vec![
Cell::new(reservation.uuid),
Cell::new(reservation.ipv4),
Cell::new(reservation.ipv6),
Cell::new(reservation.mac),
]);
}
if table.is_empty() {
println!("no network reservations found");
} else {
println!("{}", table);
}
Ok(())
}
fn print_key_value(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
for reservation in reservations {
let kvs = proto2kv(reservation)?;
println!("{}", kv2line(kvs));
}
Ok(())
}
}

View File

@ -1,43 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use list::NetworkReservationListCommand;
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
pub mod list;
#[derive(Parser)]
#[command(about = "Manage network reservations")]
pub struct NetworkReservationCommand {
#[command(subcommand)]
subcommand: NetworkReservationCommands,
}
impl NetworkReservationCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[derive(Subcommand)]
pub enum NetworkReservationCommands {
List(NetworkReservationListCommand),
}
impl NetworkReservationCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
NetworkReservationCommands::List(list) => list.run(client, events).await,
}
}
}

View File

@ -1,41 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::{events::EventStream, v1::control::control_service_client::ControlServiceClient};
use tokio::select;
use tonic::transport::Channel;
use crate::console::StdioConsoleStream;
use crate::cli::resolve_zone;
#[derive(Parser)]
#[command(about = "Attach to the zone console")]
pub struct ZoneAttachCommand {
#[arg(help = "Zone to attach to, either the name or the uuid")]
zone: String,
}
impl ZoneAttachCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let input = StdioConsoleStream::stdin_stream(zone_id.clone(), false).await;
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
let code = select! {
x = stdout_handle => {
x??;
None
},
x = exit_hook_task => x?
};
StdioConsoleStream::restore_terminal_mode();
std::process::exit(code.unwrap_or(0));
}
}

View File

@ -1,78 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::{
events::EventStream,
v1::control::{
control_service_client::ControlServiceClient, watch_events_reply::Event, DestroyZoneRequest,
},
};
use crate::cli::resolve_zone;
use krata::v1::common::ZoneState;
use log::error;
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Destroy a zone")]
pub struct ZoneDestroyCommand {
#[arg(
short = 'W',
long,
help = "Wait for the destruction of the zone to complete"
)]
wait: bool,
#[arg(help = "Zone to destroy, either the name or the uuid")]
zone: String,
}
impl ZoneDestroyCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let _ = client
.destroy_zone(Request::new(DestroyZoneRequest {
zone_id: zone_id.clone(),
}))
.await?
.into_inner();
if self.wait {
wait_zone_destroyed(&zone_id, events).await?;
}
Ok(())
}
}
async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
let mut stream = events.subscribe();
while let Ok(event) = stream.recv().await {
let Event::ZoneChanged(changed) = event;
let Some(zone) = changed.zone else {
continue;
};
if zone.id != id {
continue;
}
let Some(status) = zone.status else {
continue;
};
if let Some(ref error) = status.error_status {
if status.state() == ZoneState::Failed {
error!("destroy failed: {}", error.message);
std::process::exit(1);
} else {
error!("zone error: {}", error.message);
}
}
if status.state() == ZoneState::Destroyed {
std::process::exit(0);
}
}
Ok(())
}

View File

@ -1,89 +0,0 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::Parser;
use crossterm::tty::IsTty;
use krata::v1::{
common::{TerminalSize, ZoneTaskSpec, ZoneTaskSpecEnvVar},
control::{control_service_client::ControlServiceClient, ExecInsideZoneRequest},
};
use tokio::io::stdin;
use tonic::{transport::Channel, Request};
use crate::console::StdioConsoleStream;
use crate::cli::resolve_zone;
#[derive(Parser)]
#[command(about = "Execute a command inside the zone")]
pub struct ZoneExecCommand {
#[arg[short, long, help = "Environment variables"]]
env: Option<Vec<String>>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(short = 't', long, help = "Allocate tty")]
tty: bool,
#[arg(help = "Zone to exec inside, either the name or the uuid")]
zone: String,
#[arg(
allow_hyphen_values = true,
trailing_var_arg = true,
help = "Command to run inside the zone"
)]
command: Vec<String>,
}
impl ZoneExecCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let should_map_tty = self.tty && stdin().is_tty();
let initial = ExecInsideZoneRequest {
zone_id,
task: Some(ZoneTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
.iter()
.map(|(key, value)| ZoneTaskSpecEnvVar {
key: key.clone(),
value: value.clone(),
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
tty: self.tty,
}),
stdin: vec![],
stdin_closed: false,
terminal_size: if should_map_tty {
let size = crossterm::terminal::size().ok();
size.map(|(columns, rows)| TerminalSize {
rows: rows as u32,
columns: columns as u32,
})
} else {
None
},
};
let stream = StdioConsoleStream::input_stream_exec(initial, should_map_tty).await;
let response = client
.exec_inside_zone(Request::new(stream))
.await?
.into_inner();
let code = StdioConsoleStream::exec_output(response, should_map_tty).await?;
std::process::exit(code);
}
}
fn env_map(env: &[String]) -> HashMap<String, String> {
let mut map = HashMap::<String, String>::new();
for item in env {
if let Some((key, value)) = item.split_once('=') {
map.insert(key.to_string(), value.to_string());
}
}
map
}

View File

@ -1,282 +0,0 @@
use std::collections::HashMap;
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
v1::{
common::{
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneKernelOptionsSpec,
ZoneOciImageSpec, ZoneResourceSpec, ZoneSpec, ZoneSpecDevice, ZoneState, ZoneTaskSpec,
ZoneTaskSpecEnvVar,
},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
CreateZoneRequest, PullImageRequest,
},
},
};
use log::error;
use tokio::select;
use tonic::{transport::Channel, Request};
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
pub enum LaunchImageFormat {
Squashfs,
Erofs,
}
#[derive(Parser)]
#[command(about = "Launch a new zone")]
pub struct ZoneLaunchCommand {
#[arg(long, default_value = "squashfs", help = "Image format")]
image_format: LaunchImageFormat,
#[arg(long, help = "Overwrite image cache on pull")]
pull_overwrite_cache: bool,
#[arg(long, help = "Update image on pull")]
pull_update: bool,
#[arg(short, long, help = "Name of the zone")]
name: Option<String>,
#[arg(
short = 'C',
long = "max-cpus",
default_value_t = 4,
help = "Maximum vCPUs available for the zone"
)]
max_cpus: u32,
#[arg(
short = 'c',
long = "target-cpus",
default_value_t = 1,
help = "Target vCPUs for the zone to use"
)]
target_cpus: u32,
#[arg(
short = 'M',
long = "max-memory",
default_value_t = 1024,
help = "Maximum memory available to the zone, in megabytes"
)]
max_memory: u64,
#[arg(
short = 'm',
long = "target-memory",
default_value_t = 1024,
help = "Target memory for the zone to use, in megabytes"
)]
target_memory: u64,
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
device: Vec<String>,
#[arg[short, long, help = "Environment variables set in the zone"]]
env: Option<Vec<String>>,
#[arg(short = 't', long, help = "Allocate tty for task")]
tty: bool,
#[arg(
short,
long,
help = "Attach to the zone after zone starts, implies --wait"
)]
attach: bool,
#[arg(
short = 'W',
long,
help = "Wait for the zone to start, implied by --attach"
)]
wait: bool,
#[arg(short = 'k', long, help = "OCI kernel image for zone to use")]
kernel: Option<String>,
#[arg(short = 'I', long, help = "OCI initrd image for zone to use")]
initrd: Option<String>,
#[arg(short = 'w', long, help = "Working directory")]
working_directory: Option<String>,
#[arg(long, help = "Enable verbose logging on the kernel")]
kernel_verbose: bool,
#[arg(long, help = "Additional kernel cmdline options")]
kernel_cmdline_append: Option<String>,
#[arg(help = "Container image for zone to use")]
oci: String,
#[arg(
allow_hyphen_values = true,
trailing_var_arg = true,
help = "Command to run inside the zone"
)]
command: Vec<String>,
}
impl ZoneLaunchCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let image = self
.pull_image(
&mut client,
&self.oci,
match self.image_format {
LaunchImageFormat::Squashfs => OciImageFormat::Squashfs,
LaunchImageFormat::Erofs => OciImageFormat::Erofs,
},
)
.await?;
let kernel = if let Some(ref kernel) = self.kernel {
let kernel_image = self
.pull_image(&mut client, kernel, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let initrd = if let Some(ref initrd) = self.initrd {
let kernel_image = self
.pull_image(&mut client, initrd, OciImageFormat::Tar)
.await?;
Some(kernel_image)
} else {
None
};
let request = CreateZoneRequest {
spec: Some(ZoneSpec {
name: self.name.unwrap_or_default(),
image: Some(image),
kernel,
initrd,
initial_resources: Some(ZoneResourceSpec {
max_memory: self.max_memory,
target_memory: self.target_memory,
max_cpus: self.max_cpus,
target_cpus: self.target_cpus,
}),
task: Some(ZoneTaskSpec {
environment: env_map(&self.env.unwrap_or_default())
.iter()
.map(|(key, value)| ZoneTaskSpecEnvVar {
key: key.clone(),
value: value.clone(),
})
.collect(),
command: self.command,
working_directory: self.working_directory.unwrap_or_default(),
tty: self.tty,
}),
annotations: vec![],
devices: self
.device
.iter()
.map(|name| ZoneSpecDevice { name: name.clone() })
.collect(),
kernel_options: Some(ZoneKernelOptionsSpec {
verbose: self.kernel_verbose,
cmdline_append: self.kernel_cmdline_append.clone().unwrap_or_default(),
}),
}),
};
let response = client
.create_zone(Request::new(request))
.await?
.into_inner();
let id = response.zone_id;
if self.wait || self.attach {
wait_zone_started(&id, events.clone()).await?;
}
let code = if self.attach {
let input = StdioConsoleStream::stdin_stream(id.clone(), true).await;
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
select! {
x = stdout_handle => {
x??;
None
},
x = exit_hook_task => x?
}
} else {
println!("{}", id);
None
};
StdioConsoleStream::restore_terminal_mode();
std::process::exit(code.unwrap_or(0));
}
async fn pull_image(
&self,
client: &mut ControlServiceClient<Channel>,
image: &str,
format: OciImageFormat,
) -> Result<ZoneImageSpec> {
let response = client
.pull_image(PullImageRequest {
image: image.to_string(),
format: format.into(),
overwrite_cache: self.pull_overwrite_cache,
update: self.pull_update,
})
.await?;
let reply = pull_interactive_progress(response.into_inner()).await?;
Ok(ZoneImageSpec {
image: Some(Image::Oci(ZoneOciImageSpec {
digest: reply.digest,
format: reply.format,
})),
})
}
}
async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
let mut stream = events.subscribe();
while let Ok(event) = stream.recv().await {
match event {
Event::ZoneChanged(changed) => {
let Some(zone) = changed.zone else {
continue;
};
if zone.id != id {
continue;
}
let Some(status) = zone.status else {
continue;
};
if let Some(ref error) = status.error_status {
if status.state() == ZoneState::Failed {
error!("launch failed: {}", error.message);
std::process::exit(1);
} else {
error!("zone error: {}", error.message);
}
}
if status.state() == ZoneState::Destroyed {
error!("zone destroyed");
std::process::exit(1);
}
if status.state() == ZoneState::Created {
break;
}
}
}
}
Ok(())
}
fn env_map(env: &[String]) -> HashMap<String, String> {
let mut map = HashMap::<String, String>::new();
for item in env {
if let Some((key, value)) = item.split_once('=') {
map.insert(key.to_string(), value.to_string());
}
}
map
}

View File

@ -1,181 +0,0 @@
use anyhow::{anyhow, Result};
use clap::{Parser, ValueEnum};
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
use krata::{
events::EventStream,
v1::{
common::Zone,
control::{
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneIdRequest,
},
},
};
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_state_text};
use krata::v1::common::ZoneState;
use krata::v1::control::GetZoneRequest;
use serde_json::Value;
use tonic::{transport::Channel, Request};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ZoneListFormat {
Table,
Json,
JsonPretty,
Jsonl,
Yaml,
KeyValue,
Simple,
}
#[derive(Parser)]
#[command(about = "List zone information")]
pub struct ZoneListCommand {
#[arg(short, long, default_value = "table", help = "Output format")]
format: ZoneListFormat,
#[arg(help = "Limit to a single zone, either the name or the uuid")]
zone: Option<String>,
}
impl ZoneListCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let mut zones = if let Some(ref zone) = self.zone {
let reply = client
.resolve_zone_id(Request::new(ResolveZoneIdRequest { name: zone.clone() }))
.await?
.into_inner();
if !reply.zone_id.is_empty() {
let reply = client
.get_zone(Request::new(GetZoneRequest {
zone_id: reply.zone_id,
}))
.await?
.into_inner();
if let Some(zone) = reply.zone {
vec![zone]
} else {
return Err(anyhow!("unable to resolve zone '{}'", zone));
}
} else {
return Err(anyhow!("unable to resolve zone '{}'", zone));
}
} else {
client
.list_zones(Request::new(ListZonesRequest {}))
.await?
.into_inner()
.zones
};
zones.sort_by(|a, b| {
a.spec
.as_ref()
.map(|x| x.name.as_str())
.unwrap_or("")
.cmp(b.spec.as_ref().map(|x| x.name.as_str()).unwrap_or(""))
});
match self.format {
ZoneListFormat::Table => {
self.print_zone_table(zones)?;
}
ZoneListFormat::Simple => {
for zone in zones {
println!("{}", zone_simple_line(&zone));
}
}
ZoneListFormat::Json | ZoneListFormat::JsonPretty | ZoneListFormat::Yaml => {
let mut values = Vec::new();
for zone in zones {
let message = proto2dynamic(zone)?;
values.push(serde_json::to_value(message)?);
}
let value = Value::Array(values);
let encoded = if self.format == ZoneListFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == ZoneListFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
ZoneListFormat::Jsonl => {
for zone in zones {
let message = proto2dynamic(zone)?;
println!("{}", serde_json::to_string(&message)?);
}
}
ZoneListFormat::KeyValue => {
self.print_key_value(zones)?;
}
}
Ok(())
}
fn print_zone_table(&self, zones: Vec<Zone>) -> Result<()> {
let mut table = Table::new();
table.load_preset(UTF8_FULL_CONDENSED);
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
table.set_header(vec!["name", "uuid", "state", "ipv4", "ipv6"]);
for zone in zones {
let ipv4 = zone
.status
.as_ref()
.and_then(|x| x.network_status.as_ref())
.map(|x| x.zone_ipv4.as_str())
.unwrap_or("n/a");
let ipv6 = zone
.status
.as_ref()
.and_then(|x| x.network_status.as_ref())
.map(|x| x.zone_ipv6.as_str())
.unwrap_or("n/a");
let Some(spec) = zone.spec else {
continue;
};
let state = zone.status.as_ref().cloned().unwrap_or_default().state();
let status_text = zone_state_text(state);
let status_color = match state {
ZoneState::Destroyed | ZoneState::Failed => Color::Red,
ZoneState::Destroying | ZoneState::Exited | ZoneState::Creating => Color::Yellow,
ZoneState::Created => Color::Green,
_ => Color::Reset,
};
table.add_row(vec![
Cell::new(spec.name),
Cell::new(zone.id),
Cell::new(status_text).fg(status_color),
Cell::new(ipv4.to_string()),
Cell::new(ipv6.to_string()),
]);
}
if table.is_empty() {
if self.zone.is_none() {
println!("no zones have been launched");
}
} else {
println!("{}", table);
}
Ok(())
}
fn print_key_value(&self, zones: Vec<Zone>) -> Result<()> {
for zone in zones {
let kvs = proto2kv(zone)?;
println!("{}", kv2line(kvs),);
}
Ok(())
}
}

View File

@ -1,58 +0,0 @@
use anyhow::Result;
use async_stream::stream;
use clap::Parser;
use krata::{
events::EventStream,
v1::control::{control_service_client::ControlServiceClient, ZoneConsoleRequest},
};
use tokio::select;
use tokio_stream::{pending, StreamExt};
use tonic::transport::Channel;
use crate::console::StdioConsoleStream;
use crate::cli::resolve_zone;
#[derive(Parser)]
#[command(about = "View the logs of a zone")]
pub struct ZoneLogsCommand {
#[arg(short, long, help = "Follow output from the zone")]
follow: bool,
#[arg(help = "Zone to show logs for, either the name or the uuid")]
zone: String,
}
impl ZoneLogsCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let zone_id_stream = zone_id.clone();
let follow = self.follow;
let input = stream! {
yield ZoneConsoleRequest { zone_id: zone_id_stream, replay_history: true, data: Vec::new() };
if follow {
let mut pending = pending::<ZoneConsoleRequest>();
while let Some(x) = pending.next().await {
yield x;
}
}
};
let output = client.attach_zone_console(input).await?.into_inner();
let stdout_handle =
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, false).await });
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
let code = select! {
x = stdout_handle => {
x??;
None
},
x = exit_hook_task => x?
};
StdioConsoleStream::restore_terminal_mode();
std::process::exit(code.unwrap_or(0));
}
}

View File

@ -1,83 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
v1::{
common::ZoneMetricNode,
control::{control_service_client::ControlServiceClient, ReadZoneMetricsRequest},
},
};
use tonic::transport::Channel;
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
use crate::cli::resolve_zone;
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ZoneMetricsFormat {
Tree,
Json,
JsonPretty,
Yaml,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Read metrics from the zone")]
pub struct ZoneMetricsCommand {
#[arg(short, long, default_value = "tree", help = "Output format")]
format: ZoneMetricsFormat,
#[arg(help = "Zone to read metrics for, either the name or the uuid")]
zone: String,
}
impl ZoneMetricsCommand {
pub async fn run(
self,
mut client: ControlServiceClient<Channel>,
_events: EventStream,
) -> Result<()> {
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
let root = client
.read_zone_metrics(ReadZoneMetricsRequest { zone_id })
.await?
.into_inner()
.root
.unwrap_or_default();
match self.format {
ZoneMetricsFormat::Tree => {
self.print_metrics_tree(root)?;
}
ZoneMetricsFormat::Json | ZoneMetricsFormat::JsonPretty | ZoneMetricsFormat::Yaml => {
let value = serde_json::to_value(proto2dynamic(root)?)?;
let encoded = if self.format == ZoneMetricsFormat::JsonPretty {
serde_json::to_string_pretty(&value)?
} else if self.format == ZoneMetricsFormat::Yaml {
serde_yaml::to_string(&value)?
} else {
serde_json::to_string(&value)?
};
println!("{}", encoded.trim());
}
ZoneMetricsFormat::KeyValue => {
self.print_key_value(root)?;
}
}
Ok(())
}
fn print_metrics_tree(&self, root: ZoneMetricNode) -> Result<()> {
print!("{}", metrics_tree(root));
Ok(())
}
fn print_key_value(&self, metrics: ZoneMetricNode) -> Result<()> {
let kvs = metrics_flat(metrics);
println!("{}", kv2line(kvs));
Ok(())
}
}

View File

@ -1,95 +0,0 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use tonic::transport::Channel;
use krata::events::EventStream;
use krata::v1::control::control_service_client::ControlServiceClient;
use crate::cli::zone::attach::ZoneAttachCommand;
use crate::cli::zone::destroy::ZoneDestroyCommand;
use crate::cli::zone::exec::ZoneExecCommand;
use crate::cli::zone::launch::ZoneLaunchCommand;
use crate::cli::zone::list::ZoneListCommand;
use crate::cli::zone::logs::ZoneLogsCommand;
use crate::cli::zone::metrics::ZoneMetricsCommand;
use crate::cli::zone::resolve::ZoneResolveCommand;
use crate::cli::zone::top::ZoneTopCommand;
use crate::cli::zone::update_resources::ZoneUpdateResourcesCommand;
use crate::cli::zone::watch::ZoneWatchCommand;
pub mod attach;
pub mod destroy;
pub mod exec;
pub mod launch;
pub mod list;
pub mod logs;
pub mod metrics;
pub mod resolve;
pub mod top;
pub mod update_resources;
pub mod watch;
#[derive(Parser)]
#[command(about = "Manage the zones on the isolation engine")]
pub struct ZoneCommand {
#[command(subcommand)]
subcommand: ZoneCommands,
}
impl ZoneCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
self.subcommand.run(client, events).await
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand)]
pub enum ZoneCommands {
Attach(ZoneAttachCommand),
List(ZoneListCommand),
Launch(ZoneLaunchCommand),
Destroy(ZoneDestroyCommand),
Exec(ZoneExecCommand),
Logs(ZoneLogsCommand),
Metrics(ZoneMetricsCommand),
Resolve(ZoneResolveCommand),
Top(ZoneTopCommand),
Watch(ZoneWatchCommand),
UpdateResources(ZoneUpdateResourcesCommand),
}
impl ZoneCommands {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
match self {
ZoneCommands::Launch(launch) => launch.run(client, events).await,
ZoneCommands::Destroy(destroy) => destroy.run(client, events).await,
ZoneCommands::Attach(attach) => attach.run(client, events).await,
ZoneCommands::Logs(logs) => logs.run(client, events).await,
ZoneCommands::List(list) => list.run(client, events).await,
ZoneCommands::Watch(watch) => watch.run(events).await,
ZoneCommands::Resolve(resolve) => resolve.run(client).await,
ZoneCommands::Metrics(metrics) => metrics.run(client, events).await,
ZoneCommands::Top(top) => top.run(client, events).await,
ZoneCommands::Exec(exec) => exec.run(client).await,
ZoneCommands::UpdateResources(update_resources) => update_resources.run(client).await,
}
}
}

View File

@ -1,29 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest};
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Resolve a zone name to a uuid")]
pub struct ZoneResolveCommand {
#[arg(help = "Zone name")]
zone: String,
}
impl ZoneResolveCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let reply = client
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
name: self.zone.clone(),
}))
.await?
.into_inner();
if !reply.zone_id.is_empty() {
println!("{}", reply.zone_id);
} else {
std::process::exit(1);
}
Ok(())
}
}

View File

@ -1,215 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::{events::EventStream, v1::control::control_service_client::ControlServiceClient};
use std::{
io::{self, stdout, Stdout},
time::Duration,
};
use tokio::select;
use tokio_stream::StreamExt;
use tonic::transport::Channel;
use crossterm::{
event::{Event, KeyCode, KeyEvent, KeyEventKind},
execute,
terminal::*,
};
use ratatui::{
prelude::*,
symbols::border,
widgets::{
block::{Position, Title},
Block, Borders, Row, Table, TableState,
},
};
use crate::{
format::zone_state_text,
metrics::{
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
},
};
#[derive(Parser)]
#[command(about = "Dashboard for running zones")]
pub struct ZoneTopCommand {}
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
impl ZoneTopCommand {
pub async fn run(
self,
client: ControlServiceClient<Channel>,
events: EventStream,
) -> Result<()> {
let collector = MultiMetricCollector::new(client, events, Duration::from_millis(200))?;
let collector = collector.launch().await?;
let mut tui = ZoneTopCommand::init()?;
let mut app = ZoneTopApp {
metrics: MultiMetricState { zones: vec![] },
exit: false,
table: TableState::new(),
};
app.run(collector, &mut tui).await?;
ZoneTopCommand::restore()?;
Ok(())
}
pub fn init() -> io::Result<Tui> {
execute!(stdout(), EnterAlternateScreen)?;
enable_raw_mode()?;
Terminal::new(CrosstermBackend::new(stdout()))
}
pub fn restore() -> io::Result<()> {
execute!(stdout(), LeaveAlternateScreen)?;
disable_raw_mode()?;
Ok(())
}
}
pub struct ZoneTopApp {
table: TableState,
metrics: MultiMetricState,
exit: bool,
}
impl ZoneTopApp {
pub async fn run(
&mut self,
mut collector: MultiMetricCollectorHandle,
terminal: &mut Tui,
) -> Result<()> {
let mut events = crossterm::event::EventStream::new();
while !self.exit {
terminal.draw(|frame| self.render_frame(frame))?;
select! {
x = collector.receiver.recv() => match x {
Some(state) => {
self.metrics = state;
},
None => {
break;
}
},
x = events.next() => match x {
Some(event) => {
let event = event?;
self.handle_event(event)?;
},
None => {
break;
}
}
}
}
Ok(())
}
fn render_frame(&mut self, frame: &mut Frame) {
frame.render_widget(self, frame.area());
}
fn handle_event(&mut self, event: Event) -> io::Result<()> {
match event {
Event::Key(key_event) if key_event.kind == KeyEventKind::Press => {
self.handle_key_event(key_event)
}
_ => {}
};
Ok(())
}
fn exit(&mut self) {
self.exit = true;
}
fn handle_key_event(&mut self, key_event: KeyEvent) {
if let KeyCode::Char('q') = key_event.code {
self.exit()
}
}
}
impl Widget for &mut ZoneTopApp {
fn render(self, area: Rect, buf: &mut Buffer) {
let title = Title::from(" krata isolation engine ".bold());
let instructions = Title::from(vec![" Quit ".into(), "<Q> ".blue().bold()]);
let block = Block::default()
.title(title.alignment(Alignment::Center))
.title(
instructions
.alignment(Alignment::Center)
.position(Position::Bottom),
)
.borders(Borders::ALL)
.border_set(border::THICK);
let mut rows = vec![];
for ms in &self.metrics.zones {
let Some(ref spec) = ms.zone.spec else {
continue;
};
let Some(ref status) = ms.zone.status else {
continue;
};
let memory_total = ms
.root
.as_ref()
.and_then(|root| lookup_metric_value(root, "system/memory/total"));
let memory_used = ms
.root
.as_ref()
.and_then(|root| lookup_metric_value(root, "system/memory/used"));
let memory_free = ms
.root
.as_ref()
.and_then(|root| lookup_metric_value(root, "system/memory/free"));
let row = Row::new(vec![
spec.name.clone(),
ms.zone.id.clone(),
zone_state_text(status.state()),
memory_total.unwrap_or_default(),
memory_used.unwrap_or_default(),
memory_free.unwrap_or_default(),
]);
rows.push(row);
}
let widths = [
Constraint::Min(8),
Constraint::Min(8),
Constraint::Min(8),
Constraint::Min(8),
Constraint::Min(8),
Constraint::Min(8),
];
let table = Table::new(rows, widths)
.header(
Row::new(vec![
"name",
"id",
"status",
"total memory",
"used memory",
"free memory",
])
.style(Style::new().bold())
.bottom_margin(1),
)
.column_spacing(1)
.block(block);
StatefulWidget::render(table, area, buf, &mut self.table);
}
}

View File

@ -1,93 +0,0 @@
use anyhow::Result;
use clap::Parser;
use krata::v1::{
common::ZoneResourceSpec,
control::{control_service_client::ControlServiceClient, UpdateZoneResourcesRequest},
};
use crate::cli::resolve_zone;
use krata::v1::control::GetZoneRequest;
use tonic::{transport::Channel, Request};
#[derive(Parser)]
#[command(about = "Update the available resources to a zone")]
pub struct ZoneUpdateResourcesCommand {
#[arg(help = "Zone to update resources of, either the name or the uuid")]
zone: String,
#[arg(
short = 'C',
long = "max-cpus",
default_value_t = 0,
help = "Maximum vCPUs available to the zone (0 means previous value)"
)]
max_cpus: u32,
#[arg(
short = 'c',
long = "target-cpus",
default_value_t = 0,
help = "Target vCPUs for the zone to use (0 means previous value)"
)]
target_cpus: u32,
#[arg(
short = 'M',
long = "max-memory",
default_value_t = 0,
help = "Maximum memory available to the zone, in megabytes (0 means previous value)"
)]
max_memory: u64,
#[arg(
short = 'm',
long = "target-memory",
default_value_t = 0,
help = "Target memory for the zone to use, in megabytes (0 means previous value)"
)]
target_memory: u64,
}
impl ZoneUpdateResourcesCommand {
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
let zone_id = resolve_zone(&mut client, &self.zone).await?;
let zone = client
.get_zone(GetZoneRequest { zone_id })
.await?
.into_inner()
.zone
.unwrap_or_default();
let active_resources = zone
.status
.clone()
.unwrap_or_default()
.resource_status
.unwrap_or_default()
.active_resources
.unwrap_or_default();
client
.update_zone_resources(Request::new(UpdateZoneResourcesRequest {
zone_id: zone.id.clone(),
resources: Some(ZoneResourceSpec {
max_memory: if self.max_memory == 0 {
active_resources.max_memory
} else {
self.max_memory
},
target_memory: if self.target_memory == 0 {
active_resources.target_memory
} else {
self.target_memory
},
max_cpus: if self.max_cpus == 0 {
active_resources.max_cpus
} else {
self.max_cpus
},
target_cpus: if self.target_cpus == 0 {
active_resources.target_cpus
} else {
self.target_cpus
},
}),
}))
.await?;
Ok(())
}
}

View File

@ -1,63 +0,0 @@
use anyhow::Result;
use clap::{Parser, ValueEnum};
use krata::{
events::EventStream,
v1::{common::Zone, control::watch_events_reply::Event},
};
use prost_reflect::ReflectMessage;
use serde_json::Value;
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line};
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
enum ZoneWatchFormat {
Simple,
Json,
KeyValue,
}
#[derive(Parser)]
#[command(about = "Watch for zone changes")]
pub struct ZoneWatchCommand {
#[arg(short, long, default_value = "simple", help = "Output format")]
format: ZoneWatchFormat,
}
impl ZoneWatchCommand {
pub async fn run(self, events: EventStream) -> Result<()> {
let mut stream = events.subscribe();
loop {
let event = stream.recv().await?;
let Event::ZoneChanged(changed) = event;
let zone = changed.zone.clone();
self.print_event("zone.changed", changed, zone)?;
}
}
fn print_event(&self, typ: &str, event: impl ReflectMessage, zone: Option<Zone>) -> Result<()> {
match self.format {
ZoneWatchFormat::Simple => {
if let Some(zone) = zone {
println!("{}", zone_simple_line(&zone));
}
}
ZoneWatchFormat::Json => {
let message = proto2dynamic(event)?;
let mut value = serde_json::to_value(&message)?;
if let Value::Object(ref mut map) = value {
map.insert("event.type".to_string(), Value::String(typ.to_string()));
}
println!("{}", serde_json::to_string(&value)?);
}
ZoneWatchFormat::KeyValue => {
let mut map = proto2kv(event)?;
map.insert("event.type".to_string(), typ.to_string());
println!("{}", kv2line(map),);
}
}
Ok(())
}
}

View File

@ -1,263 +0,0 @@
use anyhow::Result;
use async_stream::stream;
use crossterm::{
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
tty::IsTty,
};
use krata::v1::common::ZoneState;
use krata::{
events::EventStream,
v1::common::TerminalSize,
v1::control::{
watch_events_reply::Event, ExecInsideZoneReply, ExecInsideZoneRequest, ZoneConsoleReply,
ZoneConsoleRequest,
},
};
use log::debug;
use tokio::{
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
select,
task::JoinHandle,
};
use tokio_stream::{Stream, StreamExt};
use tonic::Streaming;
pub struct StdioConsoleStream;
enum ExecStdinSelect {
DataRead(std::io::Result<usize>),
TerminalResize,
}
impl StdioConsoleStream {
pub async fn stdin_stream(
zone: String,
replay_history: bool,
) -> impl Stream<Item = ZoneConsoleRequest> {
let mut stdin = stdin();
stream! {
yield ZoneConsoleRequest { zone_id: zone, replay_history, data: vec![] };
let mut buffer = vec![0u8; 60];
loop {
let size = match stdin.read(&mut buffer).await {
Ok(size) => size,
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
};
let data = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
yield ZoneConsoleRequest { zone_id: String::default(), replay_history, data };
}
}
}
#[cfg(unix)]
pub async fn input_stream_exec(
initial: ExecInsideZoneRequest,
tty: bool,
) -> impl Stream<Item = ExecInsideZoneRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
let mut terminal_size_change = if tty {
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::window_change()).ok()
} else {
None
};
let mut stdin_closed = false;
loop {
let selected = if let Some(ref mut terminal_size_change) = terminal_size_change {
if stdin_closed {
select! {
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
}
} else {
select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
}
}
} else {
select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
}
};
match selected {
ExecStdinSelect::DataRead(result) => {
match result {
Ok(size) => {
let stdin = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
stdin_closed = size == 0;
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
},
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
}
},
ExecStdinSelect::TerminalResize => {
if let Ok((columns, rows)) = crossterm::terminal::size() {
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: Some(TerminalSize {
rows: rows as u32,
columns: columns as u32,
}), stdin: vec![], stdin_closed: false, };
}
}
}
}
}
}
#[cfg(not(unix))]
pub async fn input_stream_exec(
initial: ExecInsideZoneRequest,
_tty: bool,
) -> impl Stream<Item = ExecInsideZoneRequest> {
let mut stdin = stdin();
stream! {
yield initial;
let mut buffer = vec![0u8; 60];
let mut stdin_closed = false;
loop {
let selected = select! {
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
};
match selected {
ExecStdinSelect::DataRead(result) => {
match result {
Ok(size) => {
let stdin = buffer[0..size].to_vec();
if size == 1 && buffer[0] == 0x1d {
break;
}
stdin_closed = size == 0;
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
},
Err(error) => {
debug!("failed to read stdin: {}", error);
break;
}
}
},
_ => {
continue;
}
}
}
}
}
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>, raw: bool) -> Result<()> {
if raw && stdin().is_tty() {
enable_raw_mode()?;
StdioConsoleStream::register_terminal_restore_hook()?;
}
let mut stdout = stdout();
while let Some(reply) = stream.next().await {
let reply = reply?;
if reply.data.is_empty() {
continue;
}
stdout.write_all(&reply.data).await?;
stdout.flush().await?;
}
Ok(())
}
pub async fn exec_output(mut stream: Streaming<ExecInsideZoneReply>, raw: bool) -> Result<i32> {
if raw {
enable_raw_mode()?;
StdioConsoleStream::register_terminal_restore_hook()?;
}
let mut stdout = stdout();
let mut stderr = stderr();
while let Some(reply) = stream.next().await {
let reply = reply?;
if !reply.stdout.is_empty() {
stdout.write_all(&reply.stdout).await?;
stdout.flush().await?;
}
if !reply.stderr.is_empty() {
stderr.write_all(&reply.stderr).await?;
stderr.flush().await?;
}
if reply.exited {
return if reply.error.is_empty() {
Ok(reply.exit_code)
} else {
StdioConsoleStream::restore_terminal_mode();
stderr
.write_all(format!("Error: exec failed: {}\n", reply.error).as_bytes())
.await?;
stderr.flush().await?;
Ok(-1)
};
}
}
Ok(-1)
}
pub async fn zone_exit_hook(
id: String,
events: EventStream,
) -> Result<JoinHandle<Option<i32>>> {
Ok(tokio::task::spawn(async move {
let mut stream = events.subscribe();
while let Ok(event) = stream.recv().await {
let Event::ZoneChanged(changed) = event;
let Some(zone) = changed.zone else {
continue;
};
let Some(status) = zone.status else {
continue;
};
if zone.id != id {
continue;
}
if let Some(exit_status) = status.exit_status {
return Some(exit_status.code);
}
let state = status.state();
if state == ZoneState::Destroying || state == ZoneState::Destroyed {
return Some(10);
}
}
None
}))
}
fn register_terminal_restore_hook() -> Result<()> {
if stdin().is_tty() {
ctrlc::set_handler(move || {
StdioConsoleStream::restore_terminal_mode();
})?;
}
Ok(())
}
pub fn restore_terminal_mode() {
if is_raw_mode_enabled().unwrap_or(false) {
let _ = disable_raw_mode();
}
}
}

View File

@ -1,164 +0,0 @@
use std::{collections::HashMap, time::Duration};
use anyhow::Result;
use fancy_duration::FancyDuration;
use human_bytes::human_bytes;
use prost_reflect::{DynamicMessage, ReflectMessage};
use prost_types::Value;
use termtree::Tree;
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneState};
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
Ok(DynamicMessage::decode(
proto.descriptor(),
proto.encode_to_vec().as_slice(),
)?)
}
pub fn value2kv(value: serde_json::Value) -> Result<HashMap<String, String>> {
let mut map = HashMap::new();
fn crawl(prefix: String, map: &mut HashMap<String, String>, value: serde_json::Value) {
fn dot(prefix: &str, next: String) -> String {
if prefix.is_empty() {
next.to_string()
} else {
format!("{}.{}", prefix, next)
}
}
match value {
serde_json::Value::Null => {
map.insert(prefix, "null".to_string());
}
serde_json::Value::String(value) => {
map.insert(prefix, value);
}
serde_json::Value::Bool(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Number(value) => {
map.insert(prefix, value.to_string());
}
serde_json::Value::Array(value) => {
for (i, item) in value.into_iter().enumerate() {
let next = dot(&prefix, i.to_string());
crawl(next, map, item);
}
}
serde_json::Value::Object(value) => {
for (key, item) in value {
let next = dot(&prefix, key);
crawl(next, map, item);
}
}
}
}
crawl("".to_string(), &mut map, value);
Ok(map)
}
pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
let message = proto2dynamic(proto)?;
let value = serde_json::to_value(message)?;
value2kv(value)
}
pub fn kv2line(map: HashMap<String, String>) -> String {
map.iter()
.map(|(k, v)| format!("{}=\"{}\"", k, v.replace('"', "\\\"")))
.collect::<Vec<_>>()
.join(" ")
}
pub fn zone_state_text(status: ZoneState) -> String {
match status {
ZoneState::Creating => "creating",
ZoneState::Created => "created",
ZoneState::Destroying => "destroying",
ZoneState::Destroyed => "destroyed",
ZoneState::Exited => "exited",
ZoneState::Failed => "failed",
_ => "unknown",
}
.to_string()
}
pub fn zone_simple_line(zone: &Zone) -> String {
let state = zone_state_text(
zone.status
.as_ref()
.map(|x| x.state())
.unwrap_or(ZoneState::Unknown),
);
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
let network_status = zone.status.as_ref().and_then(|x| x.network_status.as_ref());
let ipv4 = network_status.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
let ipv6 = network_status.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
}
fn metrics_value_string(value: Value) -> String {
proto2dynamic(value)
.map(|x| serde_json::to_string(&x).ok())
.ok()
.flatten()
.unwrap_or_default()
}
fn metrics_value_numeric(value: Value) -> f64 {
let string = metrics_value_string(value);
string.parse::<f64>().ok().unwrap_or(f64::NAN)
}
pub fn metrics_value_pretty(value: Value, format: ZoneMetricFormat) -> String {
match format {
ZoneMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
ZoneMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
ZoneMetricFormat::DurationSeconds => {
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
}
_ => metrics_value_string(value),
}
}
fn metrics_flat_internal(prefix: &str, node: ZoneMetricNode, map: &mut HashMap<String, String>) {
if let Some(value) = node.value {
map.insert(prefix.to_string(), metrics_value_string(value));
}
for child in node.children {
let path = if prefix.is_empty() {
child.name.to_string()
} else {
format!("{}.{}", prefix, child.name)
};
metrics_flat_internal(&path, child, map);
}
}
pub fn metrics_flat(root: ZoneMetricNode) -> HashMap<String, String> {
let mut map = HashMap::new();
metrics_flat_internal("", root, &mut map);
map
}
pub fn metrics_tree(node: ZoneMetricNode) -> Tree<String> {
let mut name = node.name.to_string();
let format = node.format();
if let Some(value) = node.value {
let value_string = metrics_value_pretty(value, format);
name.push_str(&format!(": {}", value_string));
}
let mut tree = Tree::new(name);
for child in node.children {
tree.push(metrics_tree(child));
}
tree
}

View File

@ -1,5 +0,0 @@
pub mod cli;
pub mod console;
pub mod format;
pub mod metrics;
pub mod pull;

View File

@ -1,158 +0,0 @@
use crate::format::metrics_value_pretty;
use anyhow::Result;
use krata::v1::common::ZoneState;
use krata::{
events::EventStream,
v1::{
common::{Zone, ZoneMetricNode},
control::{
control_service_client::ControlServiceClient, watch_events_reply::Event,
ListZonesRequest, ReadZoneMetricsRequest,
},
},
};
use log::error;
use std::time::Duration;
use tokio::{
select,
sync::mpsc::{channel, Receiver, Sender},
task::JoinHandle,
time::{sleep, timeout},
};
use tonic::transport::Channel;
pub struct MetricState {
pub zone: Zone,
pub root: Option<ZoneMetricNode>,
}
pub struct MultiMetricState {
pub zones: Vec<MetricState>,
}
pub struct MultiMetricCollector {
client: ControlServiceClient<Channel>,
events: EventStream,
period: Duration,
}
pub struct MultiMetricCollectorHandle {
pub receiver: Receiver<MultiMetricState>,
task: JoinHandle<()>,
}
impl Drop for MultiMetricCollectorHandle {
fn drop(&mut self) {
self.task.abort();
}
}
impl MultiMetricCollector {
pub fn new(
client: ControlServiceClient<Channel>,
events: EventStream,
period: Duration,
) -> Result<MultiMetricCollector> {
Ok(MultiMetricCollector {
client,
events,
period,
})
}
pub async fn launch(mut self) -> Result<MultiMetricCollectorHandle> {
let (sender, receiver) = channel::<MultiMetricState>(100);
let task = tokio::task::spawn(async move {
if let Err(error) = self.process(sender).await {
error!("failed to process multi metric collector: {}", error);
}
});
Ok(MultiMetricCollectorHandle { receiver, task })
}
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
let mut events = self.events.subscribe();
let mut zones: Vec<Zone> = self
.client
.list_zones(ListZonesRequest {})
.await?
.into_inner()
.zones;
loop {
let collect = select! {
x = events.recv() => match x {
Ok(event) => {
let Event::ZoneChanged(changed) = event;
let Some(zone) = changed.zone else {
continue;
};
let Some(ref status) = zone.status else {
continue;
};
zones.retain(|x| x.id != zone.id);
if status.state() != ZoneState::Destroying {
zones.push(zone);
}
false
},
Err(error) => {
return Err(error.into());
}
},
_ = sleep(self.period) => {
true
}
};
if !collect {
continue;
}
let mut metrics = Vec::new();
for zone in &zones {
let Some(ref status) = zone.status else {
continue;
};
if status.state() != ZoneState::Created {
continue;
}
let root = timeout(
Duration::from_secs(5),
self.client.read_zone_metrics(ReadZoneMetricsRequest {
zone_id: zone.id.clone(),
}),
)
.await
.ok()
.and_then(|x| x.ok())
.map(|x| x.into_inner())
.and_then(|x| x.root);
metrics.push(MetricState {
zone: zone.clone(),
root,
});
}
sender.send(MultiMetricState { zones: metrics }).await?;
}
}
}
pub fn lookup<'a>(node: &'a ZoneMetricNode, path: &str) -> Option<&'a ZoneMetricNode> {
let Some((what, b)) = path.split_once('/') else {
return node.children.iter().find(|x| x.name == path);
};
let next = node.children.iter().find(|x| x.name == what)?;
return lookup(next, b);
}
pub fn lookup_metric_value(node: &ZoneMetricNode, path: &str) -> Option<String> {
lookup(node, path).and_then(|x| {
x.value
.as_ref()
.map(|v| metrics_value_pretty(v.clone(), x.format()))
})
}

View File

@ -1,268 +0,0 @@
use std::{
collections::{hash_map::Entry, HashMap},
time::Duration,
};
use anyhow::{anyhow, Result};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use krata::v1::control::{
image_progress_indication::Indication, ImageProgressIndication, ImageProgressLayerPhase,
ImageProgressPhase, PullImageReply,
};
use tokio_stream::StreamExt;
use tonic::Streaming;
const SPINNER_STRINGS: &[&str] = &[
"[= ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ = ]",
"[ =]",
"[====================]",
];
fn progress_bar_for_indication(indication: &ImageProgressIndication) -> Option<ProgressBar> {
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => None,
Some(Indication::Bar(indic)) => {
let bar = ProgressBar::new(indic.total);
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Spinner(_)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
Some(bar)
}
Some(Indication::Completed(indic)) => {
let bar = ProgressBar::new_spinner();
bar.enable_steady_tick(Duration::from_millis(100));
if !indic.message.is_empty() {
bar.finish_with_message(indic.message.clone());
} else {
bar.finish()
}
Some(bar)
}
}
}
fn configure_for_indication(
bar: &mut ProgressBar,
multi_progress: &mut MultiProgress,
indication: &ImageProgressIndication,
top_phase: Option<ImageProgressPhase>,
layer_phase: Option<ImageProgressLayerPhase>,
layer_id: Option<&str>,
) {
let prefix = if let Some(phase) = top_phase {
match phase {
ImageProgressPhase::Unknown => "unknown",
ImageProgressPhase::Started => "started",
ImageProgressPhase::Resolving => "resolving",
ImageProgressPhase::Resolved => "resolved",
ImageProgressPhase::ConfigDownload => "downloading",
ImageProgressPhase::LayerDownload => "downloading",
ImageProgressPhase::Assemble => "assembling",
ImageProgressPhase::Pack => "packing",
ImageProgressPhase::Complete => "complete",
}
} else if let Some(phase) = layer_phase {
match phase {
ImageProgressLayerPhase::Unknown => "unknown",
ImageProgressLayerPhase::Waiting => "waiting",
ImageProgressLayerPhase::Downloading => "downloading",
ImageProgressLayerPhase::Downloaded => "downloaded",
ImageProgressLayerPhase::Extracting => "extracting",
ImageProgressLayerPhase::Extracted => "extracted",
}
} else {
""
};
let prefix = prefix.to_string();
let id = if let Some(layer_id) = layer_id {
let hash = if let Some((_, hash)) = layer_id.split_once(':') {
hash
} else {
"unknown"
};
let small_hash = if hash.len() > 10 { &hash[0..10] } else { hash };
Some(format!("{:width$}", small_hash, width = 10))
} else {
None
};
let prefix = if let Some(id) = id {
format!("{} {:width$}", id, prefix, width = 11)
} else {
format!(" {:width$}", prefix, width = 11)
};
match indication.indication.as_ref() {
Some(Indication::Hidden(_)) | None => {
multi_progress.remove(bar);
return;
}
Some(Indication::Bar(indic)) => {
if indic.is_bytes {
bar.set_style(ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) eta: {eta}").unwrap().progress_chars("=>-"));
} else {
bar.set_style(
ProgressStyle::with_template(
"{prefix} [{bar:20} {msg} {human_pos}/{human_len} ({per_sec}/sec)",
)
.unwrap()
.progress_chars("=>-"),
);
}
bar.set_message(indic.message.clone());
bar.set_position(indic.current);
bar.set_length(indic.total);
}
Some(Indication::Spinner(indic)) => {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.set_message(indic.message.clone());
}
Some(Indication::Completed(indic)) => {
if bar.is_finished() {
return;
}
bar.disable_steady_tick();
bar.set_message(indic.message.clone());
if indic.total != 0 {
bar.set_position(indic.total);
bar.set_length(indic.total);
}
if bar.style().get_tick_str(0).contains('=') {
bar.set_style(
ProgressStyle::with_template("{prefix} {spinner} {msg}")
.unwrap()
.tick_strings(SPINNER_STRINGS),
);
bar.finish_with_message(indic.message.clone());
} else if indic.is_bytes {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg} {binary_total_bytes}")
.unwrap()
.progress_chars("=>-"),
);
} else {
bar.set_style(
ProgressStyle::with_template("{prefix} [{bar:20}] {msg}")
.unwrap()
.progress_chars("=>-"),
);
}
bar.tick();
bar.enable_steady_tick(Duration::from_millis(100));
}
};
bar.set_prefix(prefix);
bar.tick();
}
pub async fn pull_interactive_progress(
mut stream: Streaming<PullImageReply>,
) -> Result<PullImageReply> {
let mut multi_progress = MultiProgress::new();
multi_progress.set_move_cursor(false);
let mut progresses = HashMap::new();
while let Some(reply) = stream.next().await {
let reply = match reply {
Ok(reply) => reply,
Err(error) => {
multi_progress.clear()?;
return Err(error.into());
}
};
if reply.progress.is_none() && !reply.digest.is_empty() {
multi_progress.clear()?;
return Ok(reply);
}
let Some(oci) = reply.progress else {
continue;
};
for layer in &oci.layers {
let Some(ref indication) = layer.indication else {
continue;
};
let bar = match progresses.entry(layer.id.clone()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
None
}
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
None,
Some(layer.phase()),
Some(&layer.id),
);
}
}
if let Some(ref indication) = oci.indication {
let bar = match progresses.entry("root".to_string()) {
Entry::Occupied(entry) => Some(entry.into_mut()),
Entry::Vacant(entry) => {
if let Some(bar) = progress_bar_for_indication(indication) {
multi_progress.add(bar.clone());
Some(entry.insert(bar))
} else {
None
}
}
};
if let Some(bar) = bar {
configure_for_indication(
bar,
&mut multi_progress,
indication,
Some(oci.phase()),
None,
None,
);
}
}
}
multi_progress.clear()?;
Err(anyhow!("never received final reply for image pull"))
}

View File

@ -1,44 +0,0 @@
[package]
name = "krata-daemon"
description = "Daemon for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
repository.workspace = true
edition = "2021"
resolver = "2"
[dependencies]
krata-advmac = { workspace = true }
anyhow = { workspace = true }
async-stream = { workspace = true }
async-trait = { workspace = true }
bytes = { workspace = true }
circular-buffer = { workspace = true }
clap = { workspace = true }
env_logger = { workspace = true }
futures = { workspace = true }
ipnetwork = { workspace = true }
krata = { path = "../krata", version = "^0.0.20" }
krata-oci = { path = "../oci", version = "^0.0.20" }
krata-runtime = { path = "../runtime", version = "^0.0.20" }
log = { workspace = true }
prost = { workspace = true }
redb = { workspace = true }
scopeguard = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
signal-hook = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
toml = { workspace = true }
krata-tokio-tar = { workspace = true }
tonic = { workspace = true, features = ["tls"] }
uuid = { workspace = true }
[lib]
name = "kratad"
[[bin]]
name = "kratad"
path = "bin/daemon.rs"

View File

@ -1,39 +0,0 @@
use std::{
net::{SocketAddr, TcpStream},
str::FromStr,
sync::{atomic::AtomicBool, Arc},
};
use anyhow::Result;
use clap::Parser;
use env_logger::fmt::Target;
use log::LevelFilter;
use kratad::command::DaemonCommand;
#[tokio::main(flavor = "multi_thread", worker_threads = 10)]
async fn main() -> Result<()> {
let mut builder = env_logger::Builder::new();
builder
.filter_level(LevelFilter::Info)
.parse_default_env()
.filter(Some("backhand::filesystem::writer"), LevelFilter::Warn);
if let Ok(f_addr) = std::env::var("KRATA_FLUENT_ADDR") {
let target = SocketAddr::from_str(f_addr.as_str())?;
builder.target(Target::Pipe(Box::new(TcpStream::connect(target)?)));
}
builder.init();
mask_sighup()?;
let command = DaemonCommand::parse();
command.run().await
}
fn mask_sighup() -> Result<()> {
let flag = Arc::new(AtomicBool::new(false));
signal_hook::flag::register(signal_hook::consts::SIGHUP, flag)?;
Ok(())
}

View File

@ -1,36 +0,0 @@
use anyhow::Result;
use clap::{CommandFactory, Parser};
use krata::dial::ControlDialAddress;
use std::str::FromStr;
use crate::Daemon;
#[derive(Parser)]
#[command(version, about = "krata isolation engine daemon")]
pub struct DaemonCommand {
#[arg(
short,
long,
default_value = "unix:///var/lib/krata/daemon.socket",
help = "Listen address"
)]
listen: String,
#[arg(short, long, default_value = "/var/lib/krata", help = "Storage path")]
store: String,
}
impl DaemonCommand {
pub async fn run(self) -> Result<()> {
let addr = ControlDialAddress::from_str(&self.listen)?;
let mut daemon = Daemon::new(self.store.clone()).await?;
daemon.listen(addr).await?;
Ok(())
}
pub fn version() -> String {
DaemonCommand::command()
.get_version()
.unwrap_or("unknown")
.to_string()
}
}

View File

@ -1,124 +0,0 @@
use std::{collections::HashMap, path::Path};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use tokio::fs;
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonConfig {
#[serde(default)]
pub oci: OciConfig,
#[serde(default)]
pub pci: DaemonPciConfig,
#[serde(default = "default_network")]
pub network: DaemonNetworkConfig,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct OciConfig {
#[serde(default)]
pub seed: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonPciConfig {
#[serde(default)]
pub devices: HashMap<String, DaemonPciDeviceConfig>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct DaemonPciDeviceConfig {
pub locations: Vec<String>,
#[serde(default)]
pub permissive: bool,
#[serde(default)]
#[serde(rename = "msi-translate")]
pub msi_translate: bool,
#[serde(default)]
#[serde(rename = "power-management")]
pub power_management: bool,
#[serde(default)]
#[serde(rename = "rdm-reserve-policy")]
pub rdm_reserve_policy: DaemonPciDeviceRdmReservePolicy,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub enum DaemonPciDeviceRdmReservePolicy {
#[default]
#[serde(rename = "strict")]
Strict,
#[serde(rename = "relaxed")]
Relaxed,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonNetworkConfig {
#[serde(default = "default_network_nameservers")]
pub nameservers: Vec<String>,
#[serde(default = "default_network_ipv4")]
pub ipv4: DaemonIpv4NetworkConfig,
#[serde(default = "default_network_ipv6")]
pub ipv6: DaemonIpv6NetworkConfig,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonIpv4NetworkConfig {
#[serde(default = "default_network_ipv4_subnet")]
pub subnet: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct DaemonIpv6NetworkConfig {
#[serde(default = "default_network_ipv6_subnet")]
pub subnet: String,
}
fn default_network() -> DaemonNetworkConfig {
DaemonNetworkConfig {
nameservers: default_network_nameservers(),
ipv4: default_network_ipv4(),
ipv6: default_network_ipv6(),
}
}
fn default_network_nameservers() -> Vec<String> {
vec![
"1.1.1.1".to_string(),
"1.0.0.1".to_string(),
"2606:4700:4700::1111".to_string(),
"2606:4700:4700::1001".to_string(),
]
}
fn default_network_ipv4() -> DaemonIpv4NetworkConfig {
DaemonIpv4NetworkConfig {
subnet: default_network_ipv4_subnet(),
}
}
fn default_network_ipv4_subnet() -> String {
"10.75.0.0/16".to_string()
}
fn default_network_ipv6() -> DaemonIpv6NetworkConfig {
DaemonIpv6NetworkConfig {
subnet: default_network_ipv6_subnet(),
}
}
fn default_network_ipv6_subnet() -> String {
"fdd4:1476:6c7e::/48".to_string()
}
impl DaemonConfig {
pub async fn load(path: &Path) -> Result<DaemonConfig> {
if !path.exists() {
let config: DaemonConfig = toml::from_str("")?;
let content = toml::to_string_pretty(&config)?;
fs::write(&path, content).await?;
}
let content = fs::read_to_string(path).await?;
let config: DaemonConfig = toml::from_str(&content)?;
Ok(config)
}
}

View File

@ -1,164 +0,0 @@
use std::{collections::HashMap, sync::Arc};
use anyhow::{anyhow, Result};
use circular_buffer::CircularBuffer;
use kratart::channel::ChannelService;
use log::error;
use tokio::{
sync::{
mpsc::{error::TrySendError, Receiver, Sender},
Mutex,
},
task::JoinHandle,
};
use uuid::Uuid;
use crate::zlt::ZoneLookupTable;
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
type ConsoleBuffer = Box<RawConsoleBuffer>;
type ListenerMap = Arc<Mutex<HashMap<u32, Vec<Sender<Vec<u8>>>>>>;
type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
#[derive(Clone)]
pub struct DaemonConsoleHandle {
zlt: ZoneLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
sender: Sender<(u32, Vec<u8>)>,
task: Arc<JoinHandle<()>>,
}
#[derive(Clone)]
pub struct DaemonConsoleAttachHandle {
pub initial: Vec<u8>,
listeners: ListenerMap,
sender: Sender<(u32, Vec<u8>)>,
domid: u32,
}
impl DaemonConsoleAttachHandle {
pub async fn unsubscribe(&self) -> Result<()> {
let mut guard = self.listeners.lock().await;
let _ = guard.remove(&self.domid);
Ok(())
}
pub async fn send(&self, data: Vec<u8>) -> Result<()> {
Ok(self.sender.send((self.domid, data)).await?)
}
}
impl DaemonConsoleHandle {
pub async fn attach(
&self,
uuid: Uuid,
sender: Sender<Vec<u8>>,
) -> Result<DaemonConsoleAttachHandle> {
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
let buffers = self.buffers.lock().await;
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
drop(buffers);
let mut listeners = self.listeners.lock().await;
let senders = listeners.entry(domid).or_default();
senders.push(sender);
Ok(DaemonConsoleAttachHandle {
initial: buffer,
sender: self.sender.clone(),
listeners: self.listeners.clone(),
domid,
})
}
}
impl Drop for DaemonConsoleHandle {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
}
}
}
pub struct DaemonConsole {
zlt: ZoneLookupTable,
listeners: ListenerMap,
buffers: BufferMap,
receiver: Receiver<(u32, Option<Vec<u8>>)>,
sender: Sender<(u32, Vec<u8>)>,
task: JoinHandle<()>,
}
impl DaemonConsole {
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonConsole> {
let (service, sender, receiver) =
ChannelService::new("krata-console".to_string(), Some(0)).await?;
let task = service.launch().await?;
let listeners = Arc::new(Mutex::new(HashMap::new()));
let buffers = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonConsole {
zlt,
listeners,
buffers,
receiver,
sender,
task,
})
}
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
let zlt = self.zlt.clone();
let listeners = self.listeners.clone();
let buffers = self.buffers.clone();
let sender = self.sender.clone();
let task = tokio::task::spawn(async move {
if let Err(error) = self.process().await {
error!("failed to process console: {}", error);
}
});
Ok(DaemonConsoleHandle {
zlt,
listeners,
buffers,
sender,
task: Arc::new(task),
})
}
async fn process(&mut self) -> Result<()> {
loop {
let Some((domid, data)) = self.receiver.recv().await else {
break;
};
let mut buffers = self.buffers.lock().await;
if let Some(data) = data {
let buffer = buffers
.entry(domid)
.or_insert_with_key(|_| RawConsoleBuffer::boxed());
buffer.extend_from_slice(&data);
drop(buffers);
let mut listeners = self.listeners.lock().await;
if let Some(senders) = listeners.get_mut(&domid) {
senders.retain(|sender| {
!matches!(sender.try_send(data.to_vec()), Err(TrySendError::Closed(_)))
});
}
} else {
buffers.remove(&domid);
let mut listeners = self.listeners.lock().await;
listeners.remove(&domid);
}
}
Ok(())
}
}
impl Drop for DaemonConsole {
fn drop(&mut self) {
self.task.abort();
}
}

View File

@ -1,84 +0,0 @@
use std::pin::Pin;
use std::str::FromStr;
use anyhow::{anyhow, Result};
use async_stream::try_stream;
use tokio::select;
use tokio::sync::mpsc::channel;
use tokio_stream::{Stream, StreamExt};
use tonic::{Status, Streaming};
use uuid::Uuid;
use krata::v1::control::{ZoneConsoleReply, ZoneConsoleRequest};
use crate::console::DaemonConsoleHandle;
use crate::control::ApiError;
enum ConsoleDataSelect {
Read(Option<Vec<u8>>),
Write(Option<Result<ZoneConsoleRequest, Status>>),
}
pub struct AttachZoneConsoleRpc {
console: DaemonConsoleHandle,
}
impl AttachZoneConsoleRpc {
pub fn new(console: DaemonConsoleHandle) -> Self {
Self { console }
}
pub async fn process(
self,
mut input: Streaming<ZoneConsoleRequest>,
) -> Result<Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>>
{
let Some(request) = input.next().await else {
return Err(anyhow!("expected to have at least one request"));
};
let request = request?;
let uuid = Uuid::from_str(&request.zone_id)?;
let (sender, mut receiver) = channel(100);
let console = self
.console
.attach(uuid, sender)
.await
.map_err(|error| anyhow!("failed to attach to console: {}", error))?;
let output = try_stream! {
if request.replay_history {
yield ZoneConsoleReply { data: console.initial.clone(), };
}
loop {
let what = select! {
x = receiver.recv() => ConsoleDataSelect::Read(x),
x = input.next() => ConsoleDataSelect::Write(x),
};
match what {
ConsoleDataSelect::Read(Some(data)) => {
yield ZoneConsoleReply { data, };
},
ConsoleDataSelect::Read(None) => {
break;
}
ConsoleDataSelect::Write(Some(request)) => {
let request = request?;
if !request.data.is_empty() {
console.send(request.data).await.map_err(|error| ApiError {
message: error.to_string(),
})?;
}
},
ConsoleDataSelect::Write(None) => {
break;
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -1,56 +0,0 @@
use crate::db::zone::ZoneStore;
use crate::zlt::ZoneLookupTable;
use anyhow::{anyhow, Result};
use krata::v1::common::{Zone, ZoneState, ZoneStatus};
use krata::v1::control::{CreateZoneReply, CreateZoneRequest};
use tokio::sync::mpsc::Sender;
use uuid::Uuid;
pub struct CreateZoneRpc {
zones: ZoneStore,
zlt: ZoneLookupTable,
zone_reconciler_notify: Sender<Uuid>,
}
impl CreateZoneRpc {
pub fn new(
zones: ZoneStore,
zlt: ZoneLookupTable,
zone_reconciler_notify: Sender<Uuid>,
) -> Self {
Self {
zones,
zlt,
zone_reconciler_notify,
}
}
pub async fn process(self, request: CreateZoneRequest) -> Result<CreateZoneReply> {
let Some(spec) = request.spec else {
return Err(anyhow!("zone spec not provided"));
};
let uuid = Uuid::new_v4();
self.zones
.update(
uuid,
Zone {
id: uuid.to_string(),
status: Some(ZoneStatus {
state: ZoneState::Creating.into(),
network_status: None,
exit_status: None,
error_status: None,
resource_status: None,
host: self.zlt.host_uuid().to_string(),
domid: u32::MAX,
}),
spec: Some(spec),
},
)
.await?;
self.zone_reconciler_notify.send(uuid).await?;
Ok(CreateZoneReply {
zone_id: uuid.to_string(),
})
}
}

View File

@ -1,42 +0,0 @@
use std::str::FromStr;
use anyhow::{anyhow, Result};
use tokio::sync::mpsc::Sender;
use uuid::Uuid;
use krata::v1::common::ZoneState;
use krata::v1::control::{DestroyZoneReply, DestroyZoneRequest};
use crate::db::zone::ZoneStore;
pub struct DestroyZoneRpc {
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
}
impl DestroyZoneRpc {
pub fn new(zones: ZoneStore, zone_reconciler_notify: Sender<Uuid>) -> Self {
Self {
zones,
zone_reconciler_notify,
}
}
pub async fn process(self, request: DestroyZoneRequest) -> Result<DestroyZoneReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let Some(mut zone) = self.zones.read(uuid).await? else {
return Err(anyhow!("zone not found"));
};
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
if zone.status.as_ref().unwrap().state() == ZoneState::Destroyed {
return Err(anyhow!("zone already destroyed"));
}
zone.status.as_mut().unwrap().state = ZoneState::Destroying.into();
self.zones.update(uuid, zone).await?;
self.zone_reconciler_notify.send(uuid).await?;
Ok(DestroyZoneReply {})
}
}

View File

@ -1,133 +0,0 @@
use std::pin::Pin;
use std::str::FromStr;
use anyhow::{anyhow, Result};
use async_stream::try_stream;
use tokio::select;
use tokio_stream::{Stream, StreamExt};
use tonic::{Status, Streaming};
use uuid::Uuid;
use krata::idm::internal::Request;
use krata::{
idm::internal::{
exec_stream_request_update::Update, request::Request as IdmRequestType,
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
ExecStreamRequestStdin, ExecStreamRequestTerminalSize, ExecStreamRequestUpdate,
Request as IdmRequest,
},
v1::control::{ExecInsideZoneReply, ExecInsideZoneRequest},
};
use crate::control::ApiError;
use crate::idm::DaemonIdmHandle;
pub struct ExecInsideZoneRpc {
idm: DaemonIdmHandle,
}
impl ExecInsideZoneRpc {
pub fn new(idm: DaemonIdmHandle) -> Self {
Self { idm }
}
pub async fn process(
self,
mut input: Streaming<ExecInsideZoneRequest>,
) -> Result<Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>>
{
let Some(request) = input.next().await else {
return Err(anyhow!("expected to have at least one request"));
};
let request = request?;
let Some(task) = request.task else {
return Err(anyhow!("task is missing"));
};
let uuid = Uuid::from_str(&request.zone_id)?;
let idm = self.idm.client(uuid).await?;
let idm_request = Request {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Start(ExecStreamRequestStart {
environment: task
.environment
.into_iter()
.map(|x| ExecEnvVar {
key: x.key,
value: x.value,
})
.collect(),
command: task.command,
working_directory: task.working_directory,
tty: task.tty,
terminal_size: request.terminal_size.map(|size| {
ExecStreamRequestTerminalSize {
rows: size.rows,
columns: size.columns,
}
}),
})),
})),
};
let output = try_stream! {
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
message: x.to_string(),
})?;
loop {
select! {
x = input.next() => if let Some(update) = x {
let update: Result<ExecInsideZoneRequest, Status> = update.map_err(|error| ApiError {
message: error.to_string()
}.into());
if let Ok(update) = update {
if !update.stdin.is_empty() {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::Stdin(ExecStreamRequestStdin {
data: update.stdin,
closed: update.stdin_closed,
})),
}))}).await;
}
if let Some(ref terminal_size) = update.terminal_size {
let _ = handle.update(IdmRequest {
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
update: Some(Update::TerminalResize(ExecStreamRequestTerminalSize {
rows: terminal_size.rows,
columns: terminal_size.columns,
})),
}))}).await;
}
}
},
x = handle.receiver.recv() => match x {
Some(response) => {
let Some(IdmResponseType::ExecStream(update)) = response.response else {
break;
};
let reply = ExecInsideZoneReply {
exited: update.exited,
error: update.error,
exit_code: update.exit_code,
stdout: update.stdout,
stderr: update.stderr,
};
yield reply;
},
None => {
break;
}
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -1,33 +0,0 @@
use anyhow::Result;
use krata::v1::control::{GetHostCpuTopologyReply, GetHostCpuTopologyRequest, HostCpuTopologyInfo};
use kratart::Runtime;
pub struct GetHostCpuTopologyRpc {
runtime: Runtime,
}
impl GetHostCpuTopologyRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
_request: GetHostCpuTopologyRequest,
) -> Result<GetHostCpuTopologyReply> {
let power = self.runtime.power_management_context().await?;
let cpu_topology = power.cpu_topology().await?;
let mut cpus = vec![];
for cpu in cpu_topology {
cpus.push(HostCpuTopologyInfo {
core: cpu.core,
socket: cpu.socket,
node: cpu.node,
thread: cpu.thread,
class: cpu.class as i32,
})
}
Ok(GetHostCpuTopologyReply { cpus })
}
}

View File

@ -1,37 +0,0 @@
use crate::command::DaemonCommand;
use crate::network::assignment::NetworkAssignment;
use crate::zlt::ZoneLookupTable;
use anyhow::Result;
use krata::v1::control::{GetHostStatusReply, GetHostStatusRequest};
pub struct GetHostStatusRpc {
network: NetworkAssignment,
zlt: ZoneLookupTable,
}
impl GetHostStatusRpc {
pub fn new(ip: NetworkAssignment, zlt: ZoneLookupTable) -> Self {
Self { network: ip, zlt }
}
pub async fn process(self, _request: GetHostStatusRequest) -> Result<GetHostStatusReply> {
let host_reservation = self.network.retrieve(self.zlt.host_uuid()).await?;
Ok(GetHostStatusReply {
host_domid: self.zlt.host_domid(),
host_uuid: self.zlt.host_uuid().to_string(),
krata_version: DaemonCommand::version(),
host_ipv4: host_reservation
.as_ref()
.map(|x| format!("{}/{}", x.ipv4, x.ipv4_prefix))
.unwrap_or_default(),
host_ipv6: host_reservation
.as_ref()
.map(|x| format!("{}/{}", x.ipv6, x.ipv6_prefix))
.unwrap_or_default(),
host_mac: host_reservation
.as_ref()
.map(|x| x.mac.to_string().to_lowercase().replace('-', ":"))
.unwrap_or_default(),
})
}
}

View File

@ -1,24 +0,0 @@
use std::str::FromStr;
use anyhow::Result;
use uuid::Uuid;
use krata::v1::control::{GetZoneReply, GetZoneRequest};
use crate::db::zone::ZoneStore;
pub struct GetZoneRpc {
zones: ZoneStore,
}
impl GetZoneRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, request: GetZoneRequest) -> Result<GetZoneReply> {
let mut zones = self.zones.list().await?;
let zone = zones.remove(&Uuid::from_str(&request.zone_id)?);
Ok(GetZoneReply { zone })
}
}

View File

@ -1,28 +0,0 @@
use anyhow::Result;
use krata::v1::control::{DeviceInfo, ListDevicesReply, ListDevicesRequest};
use crate::devices::DaemonDeviceManager;
pub struct ListDevicesRpc {
devices: DaemonDeviceManager,
}
impl ListDevicesRpc {
pub fn new(devices: DaemonDeviceManager) -> Self {
Self { devices }
}
pub async fn process(self, _request: ListDevicesRequest) -> Result<ListDevicesReply> {
let mut devices = Vec::new();
let state = self.devices.copy().await?;
for (name, state) in state {
devices.push(DeviceInfo {
name,
claimed: state.owner.is_some(),
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
});
}
Ok(ListDevicesReply { devices })
}
}

View File

@ -1,28 +0,0 @@
use anyhow::Result;
use krata::v1::{
common::NetworkReservation,
control::{ListNetworkReservationsReply, ListNetworkReservationsRequest},
};
use crate::network::assignment::NetworkAssignment;
pub struct ListNetworkReservationsRpc {
network: NetworkAssignment,
}
impl ListNetworkReservationsRpc {
pub fn new(network: NetworkAssignment) -> Self {
Self { network }
}
pub async fn process(
self,
_request: ListNetworkReservationsRequest,
) -> Result<ListNetworkReservationsReply> {
let state = self.network.read_reservations().await?;
let reservations: Vec<NetworkReservation> =
state.into_values().map(|x| x.into()).collect::<Vec<_>>();
Ok(ListNetworkReservationsReply { reservations })
}
}

View File

@ -1,21 +0,0 @@
use anyhow::Result;
use krata::v1::common::Zone;
use krata::v1::control::{ListZonesReply, ListZonesRequest};
use crate::db::zone::ZoneStore;
pub struct ListZonesRpc {
zones: ZoneStore,
}
impl ListZonesRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, _request: ListZonesRequest) -> Result<ListZonesReply> {
let zones = self.zones.list().await?;
let zones = zones.into_values().collect::<Vec<Zone>>();
Ok(ListZonesReply { zones })
}
}

View File

@ -1,365 +0,0 @@
use std::pin::Pin;
use anyhow::Error;
use futures::Stream;
use list_network_reservations::ListNetworkReservationsRpc;
use tokio::sync::mpsc::Sender;
use tonic::{Request, Response, Status, Streaming};
use uuid::Uuid;
use krata::v1::control::{
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest, DestroyZoneReply,
DestroyZoneRequest, ExecInsideZoneReply, ExecInsideZoneRequest, GetHostCpuTopologyReply,
GetHostCpuTopologyRequest, GetHostStatusReply, GetHostStatusRequest, ListDevicesReply,
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest, ReadZoneMetricsReply,
ReadZoneMetricsRequest, ResolveZoneIdReply, ResolveZoneIdRequest, SnoopIdmReply,
SnoopIdmRequest, UpdateZoneResourcesReply, UpdateZoneResourcesRequest, WatchEventsReply,
WatchEventsRequest, ZoneConsoleReply, ZoneConsoleRequest,
};
use krata::v1::control::{
GetZoneReply, GetZoneRequest, ListNetworkReservationsReply, ListNetworkReservationsRequest,
SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest,
};
use krataoci::packer::service::OciPackerService;
use kratart::Runtime;
use crate::control::attach_zone_console::AttachZoneConsoleRpc;
use crate::control::create_zone::CreateZoneRpc;
use crate::control::destroy_zone::DestroyZoneRpc;
use crate::control::exec_inside_zone::ExecInsideZoneRpc;
use crate::control::get_host_cpu_topology::GetHostCpuTopologyRpc;
use crate::control::get_host_status::GetHostStatusRpc;
use crate::control::get_zone::GetZoneRpc;
use crate::control::list_devices::ListDevicesRpc;
use crate::control::list_zones::ListZonesRpc;
use crate::control::pull_image::PullImageRpc;
use crate::control::read_hypervisor_console::ReadHypervisorConsoleRpc;
use crate::control::read_zone_metrics::ReadZoneMetricsRpc;
use crate::control::resolve_zone_id::ResolveZoneIdRpc;
use crate::control::set_host_power_management_policy::SetHostPowerManagementPolicyRpc;
use crate::control::snoop_idm::SnoopIdmRpc;
use crate::control::update_zone_resources::UpdateZoneResourcesRpc;
use crate::control::watch_events::WatchEventsRpc;
use crate::db::zone::ZoneStore;
use crate::network::assignment::NetworkAssignment;
use crate::{
console::DaemonConsoleHandle, devices::DaemonDeviceManager, event::DaemonEventContext,
idm::DaemonIdmHandle, zlt::ZoneLookupTable,
};
pub mod attach_zone_console;
pub mod create_zone;
pub mod destroy_zone;
pub mod exec_inside_zone;
pub mod get_host_cpu_topology;
pub mod get_host_status;
pub mod get_zone;
pub mod list_devices;
pub mod list_network_reservations;
pub mod list_zones;
pub mod pull_image;
pub mod read_hypervisor_console;
pub mod read_zone_metrics;
pub mod resolve_zone_id;
pub mod set_host_power_management_policy;
pub mod snoop_idm;
pub mod update_zone_resources;
pub mod watch_events;
pub struct ApiError {
message: String,
}
impl From<Error> for ApiError {
fn from(value: Error) -> Self {
ApiError {
message: value.to_string(),
}
}
}
impl From<ApiError> for Status {
fn from(value: ApiError) -> Self {
Status::unknown(value.message)
}
}
#[derive(Clone)]
pub struct DaemonControlService {
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
network: NetworkAssignment,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
}
impl DaemonControlService {
#[allow(clippy::too_many_arguments)]
pub fn new(
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
events: DaemonEventContext,
console: DaemonConsoleHandle,
idm: DaemonIdmHandle,
zones: ZoneStore,
network: NetworkAssignment,
zone_reconciler_notify: Sender<Uuid>,
packer: OciPackerService,
runtime: Runtime,
) -> Self {
Self {
zlt,
devices,
events,
console,
idm,
zones,
network,
zone_reconciler_notify,
packer,
runtime,
}
}
}
#[tonic::async_trait]
impl ControlService for DaemonControlService {
async fn get_host_status(
&self,
request: Request<GetHostStatusRequest>,
) -> Result<Response<GetHostStatusReply>, Status> {
let request = request.into_inner();
adapt(
GetHostStatusRpc::new(self.network.clone(), self.zlt.clone())
.process(request)
.await,
)
}
type SnoopIdmStream =
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
async fn snoop_idm(
&self,
request: Request<SnoopIdmRequest>,
) -> Result<Response<Self::SnoopIdmStream>, Status> {
let request = request.into_inner();
adapt(
SnoopIdmRpc::new(self.idm.clone(), self.zlt.clone())
.process(request)
.await,
)
}
async fn get_host_cpu_topology(
&self,
request: Request<GetHostCpuTopologyRequest>,
) -> Result<Response<GetHostCpuTopologyReply>, Status> {
let request = request.into_inner();
adapt(
GetHostCpuTopologyRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
async fn set_host_power_management_policy(
&self,
request: Request<SetHostPowerManagementPolicyRequest>,
) -> Result<Response<SetHostPowerManagementPolicyReply>, Status> {
let request = request.into_inner();
adapt(
SetHostPowerManagementPolicyRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
async fn list_devices(
&self,
request: Request<ListDevicesRequest>,
) -> Result<Response<ListDevicesReply>, Status> {
let request = request.into_inner();
adapt(
ListDevicesRpc::new(self.devices.clone())
.process(request)
.await,
)
}
async fn list_network_reservations(
&self,
request: Request<ListNetworkReservationsRequest>,
) -> Result<Response<ListNetworkReservationsReply>, Status> {
let request = request.into_inner();
adapt(
ListNetworkReservationsRpc::new(self.network.clone())
.process(request)
.await,
)
}
type PullImageStream =
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
async fn pull_image(
&self,
request: Request<PullImageRequest>,
) -> Result<Response<Self::PullImageStream>, Status> {
let request = request.into_inner();
adapt(
PullImageRpc::new(self.packer.clone())
.process(request)
.await,
)
}
async fn create_zone(
&self,
request: Request<CreateZoneRequest>,
) -> Result<Response<CreateZoneReply>, Status> {
let request = request.into_inner();
adapt(
CreateZoneRpc::new(
self.zones.clone(),
self.zlt.clone(),
self.zone_reconciler_notify.clone(),
)
.process(request)
.await,
)
}
async fn destroy_zone(
&self,
request: Request<DestroyZoneRequest>,
) -> Result<Response<DestroyZoneReply>, Status> {
let request = request.into_inner();
adapt(
DestroyZoneRpc::new(self.zones.clone(), self.zone_reconciler_notify.clone())
.process(request)
.await,
)
}
async fn resolve_zone_id(
&self,
request: Request<ResolveZoneIdRequest>,
) -> Result<Response<ResolveZoneIdReply>, Status> {
let request = request.into_inner();
adapt(
ResolveZoneIdRpc::new(self.zones.clone())
.process(request)
.await,
)
}
async fn get_zone(
&self,
request: Request<GetZoneRequest>,
) -> Result<Response<GetZoneReply>, Status> {
let request = request.into_inner();
adapt(GetZoneRpc::new(self.zones.clone()).process(request).await)
}
async fn update_zone_resources(
&self,
request: Request<UpdateZoneResourcesRequest>,
) -> Result<Response<UpdateZoneResourcesReply>, Status> {
let request = request.into_inner();
adapt(
UpdateZoneResourcesRpc::new(self.runtime.clone(), self.zones.clone())
.process(request)
.await,
)
}
async fn list_zones(
&self,
request: Request<ListZonesRequest>,
) -> Result<Response<ListZonesReply>, Status> {
let request = request.into_inner();
adapt(ListZonesRpc::new(self.zones.clone()).process(request).await)
}
type AttachZoneConsoleStream =
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
async fn attach_zone_console(
&self,
request: Request<Streaming<ZoneConsoleRequest>>,
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
let input = request.into_inner();
adapt(
AttachZoneConsoleRpc::new(self.console.clone())
.process(input)
.await,
)
}
type ExecInsideZoneStream =
Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>;
async fn exec_inside_zone(
&self,
request: Request<Streaming<ExecInsideZoneRequest>>,
) -> Result<Response<Self::ExecInsideZoneStream>, Status> {
let input = request.into_inner();
adapt(
ExecInsideZoneRpc::new(self.idm.clone())
.process(input)
.await,
)
}
async fn read_zone_metrics(
&self,
request: Request<ReadZoneMetricsRequest>,
) -> Result<Response<ReadZoneMetricsReply>, Status> {
let request = request.into_inner();
adapt(
ReadZoneMetricsRpc::new(self.idm.clone())
.process(request)
.await,
)
}
type WatchEventsStream =
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
async fn watch_events(
&self,
request: Request<WatchEventsRequest>,
) -> Result<Response<Self::WatchEventsStream>, Status> {
let request = request.into_inner();
adapt(
WatchEventsRpc::new(self.events.clone())
.process(request)
.await,
)
}
async fn read_hypervisor_console(
&self,
request: Request<ReadHypervisorConsoleRequest>,
) -> Result<Response<ReadHypervisorConsoleReply>, Status> {
let request = request.into_inner();
adapt(
ReadHypervisorConsoleRpc::new(self.runtime.clone())
.process(request)
.await,
)
}
}
fn adapt<T>(result: anyhow::Result<T>) -> Result<Response<T>, Status> {
result
.map(Response::new)
.map_err(|error| Status::unknown(error.to_string()))
}

View File

@ -1,100 +0,0 @@
use crate::control::ApiError;
use crate::oci::convert_oci_progress;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::common::OciImageFormat;
use krata::v1::control::{PullImageReply, PullImageRequest};
use krataoci::name::ImageName;
use krataoci::packer::service::OciPackerService;
use krataoci::packer::{OciPackedFormat, OciPackedImage};
use krataoci::progress::{OciProgress, OciProgressContext};
use std::pin::Pin;
use tokio::select;
use tokio::task::JoinError;
use tokio_stream::Stream;
use tonic::Status;
enum PullImageSelect {
Progress(Option<OciProgress>),
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
}
pub struct PullImageRpc {
packer: OciPackerService,
}
impl PullImageRpc {
pub fn new(packer: OciPackerService) -> Self {
Self { packer }
}
pub async fn process(
self,
request: PullImageRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>> {
let name = ImageName::parse(&request.image)?;
let format = match request.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => OciPackedFormat::Tar,
};
let (context, mut receiver) = OciProgressContext::create();
let our_packer = self.packer;
let output = try_stream! {
let mut task = tokio::task::spawn(async move {
our_packer.request(name, format, request.overwrite_cache, request.update, context).await
});
let abort_handle = task.abort_handle();
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
handle.abort();
});
loop {
let what = select! {
x = receiver.changed() => match x {
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
Err(_) => PullImageSelect::Progress(None),
},
x = &mut task => PullImageSelect::Completed(x),
};
match what {
PullImageSelect::Progress(Some(progress)) => {
let reply = PullImageReply {
progress: Some(convert_oci_progress(progress)),
digest: String::new(),
format: OciImageFormat::Unknown.into(),
};
yield reply;
},
PullImageSelect::Completed(result) => {
let result = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let packed = result.map_err(|err| ApiError {
message: err.to_string(),
})?;
let reply = PullImageReply {
progress: None,
digest: packed.digest,
format: match packed.format {
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
},
};
yield reply;
break;
},
_ => {
continue;
}
}
}
};
Ok(Box::pin(output))
}
}

View File

@ -1,23 +0,0 @@
use anyhow::Result;
use krata::v1::control::{ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest};
use kratart::Runtime;
pub struct ReadHypervisorConsoleRpc {
runtime: Runtime,
}
impl ReadHypervisorConsoleRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
_: ReadHypervisorConsoleRequest,
) -> Result<ReadHypervisorConsoleReply> {
let data = self.runtime.read_hypervisor_console(false).await?;
Ok(ReadHypervisorConsoleReply {
data: data.to_string(),
})
}
}

View File

@ -1,40 +0,0 @@
use std::str::FromStr;
use anyhow::Result;
use uuid::Uuid;
use krata::idm::internal::MetricsRequest;
use krata::idm::internal::{
request::Request as IdmRequestType, response::Response as IdmResponseType,
Request as IdmRequest,
};
use krata::v1::control::{ReadZoneMetricsReply, ReadZoneMetricsRequest};
use crate::idm::DaemonIdmHandle;
use crate::metrics::idm_metric_to_api;
pub struct ReadZoneMetricsRpc {
idm: DaemonIdmHandle,
}
impl ReadZoneMetricsRpc {
pub fn new(idm: DaemonIdmHandle) -> Self {
Self { idm }
}
pub async fn process(self, request: ReadZoneMetricsRequest) -> Result<ReadZoneMetricsReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let client = self.idm.client(uuid).await?;
let response = client
.send(IdmRequest {
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
})
.await?;
let mut reply = ReadZoneMetricsReply::default();
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
reply.root = metrics.root.map(idm_metric_to_api);
}
Ok(reply)
}
}

View File

@ -1,30 +0,0 @@
use anyhow::Result;
use krata::v1::common::Zone;
use krata::v1::control::{ResolveZoneIdReply, ResolveZoneIdRequest};
use crate::db::zone::ZoneStore;
pub struct ResolveZoneIdRpc {
zones: ZoneStore,
}
impl ResolveZoneIdRpc {
pub fn new(zones: ZoneStore) -> Self {
Self { zones }
}
pub async fn process(self, request: ResolveZoneIdRequest) -> Result<ResolveZoneIdReply> {
let zones = self.zones.list().await?;
let zones = zones
.into_values()
.filter(|x| {
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
(!request.name.is_empty() && comparison_spec.name == request.name)
|| x.id == request.name
})
.collect::<Vec<Zone>>();
Ok(ResolveZoneIdReply {
zone_id: zones.first().cloned().map(|x| x.id).unwrap_or_default(),
})
}
}

View File

@ -1,25 +0,0 @@
use anyhow::Result;
use krata::v1::control::{SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest};
use kratart::Runtime;
pub struct SetHostPowerManagementPolicyRpc {
runtime: Runtime,
}
impl SetHostPowerManagementPolicyRpc {
pub fn new(runtime: Runtime) -> Self {
Self { runtime }
}
pub async fn process(
self,
request: SetHostPowerManagementPolicyRequest,
) -> Result<SetHostPowerManagementPolicyReply> {
let power = self.runtime.power_management_context().await?;
let scheduler = &request.scheduler;
power.set_smt_policy(request.smt_awareness).await?;
power.set_scheduler_policy(scheduler).await?;
Ok(SetHostPowerManagementPolicyReply {})
}
}

View File

@ -1,39 +0,0 @@
use crate::idm::DaemonIdmHandle;
use crate::zlt::ZoneLookupTable;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::control::{SnoopIdmReply, SnoopIdmRequest};
use std::pin::Pin;
use tokio_stream::Stream;
use tonic::Status;
pub struct SnoopIdmRpc {
idm: DaemonIdmHandle,
zlt: ZoneLookupTable,
}
impl SnoopIdmRpc {
pub fn new(idm: DaemonIdmHandle, zlt: ZoneLookupTable) -> Self {
Self { idm, zlt }
}
pub async fn process(
self,
_request: SnoopIdmRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>> {
let mut messages = self.idm.snoop();
let zlt = self.zlt.clone();
let output = try_stream! {
while let Ok(event) = messages.recv().await {
let Some(from_uuid) = zlt.lookup_uuid_by_domid(event.from).await else {
continue;
};
let Some(to_uuid) = zlt.lookup_uuid_by_domid(event.to).await else {
continue;
};
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
}
};
Ok(Box::pin(output))
}
}

View File

@ -1,82 +0,0 @@
use std::str::FromStr;
use anyhow::{anyhow, Result};
use uuid::Uuid;
use krata::v1::common::{ZoneResourceStatus, ZoneState};
use krata::v1::control::{UpdateZoneResourcesReply, UpdateZoneResourcesRequest};
use kratart::Runtime;
use crate::db::zone::ZoneStore;
pub struct UpdateZoneResourcesRpc {
runtime: Runtime,
zones: ZoneStore,
}
impl UpdateZoneResourcesRpc {
pub fn new(runtime: Runtime, zones: ZoneStore) -> Self {
Self { runtime, zones }
}
pub async fn process(
self,
request: UpdateZoneResourcesRequest,
) -> Result<UpdateZoneResourcesReply> {
let uuid = Uuid::from_str(&request.zone_id)?;
let Some(mut zone) = self.zones.read(uuid).await? else {
return Err(anyhow!("zone not found"));
};
let Some(ref mut status) = zone.status else {
return Err(anyhow!("zone state not available"));
};
if status.state() != ZoneState::Created {
return Err(anyhow!("zone is in an invalid state"));
}
if status.domid == 0 || status.domid == u32::MAX {
return Err(anyhow!("zone domid is invalid"));
}
let mut resources = request.resources.unwrap_or_default();
if resources.target_memory > resources.max_memory {
resources.max_memory = resources.target_memory;
}
if resources.target_cpus < 1 {
resources.target_cpus = 1;
}
let initial_resources = zone
.spec
.clone()
.unwrap_or_default()
.initial_resources
.unwrap_or_default();
if resources.target_cpus > initial_resources.max_cpus {
resources.target_cpus = initial_resources.max_cpus;
}
resources.max_cpus = initial_resources.max_cpus;
self.runtime
.set_memory_resources(
status.domid,
resources.target_memory * 1024 * 1024,
resources.max_memory * 1024 * 1024,
)
.await
.map_err(|error| anyhow!("failed to set memory resources: {}", error))?;
self.runtime
.set_cpu_resources(status.domid, resources.target_cpus)
.await
.map_err(|error| anyhow!("failed to set cpu resources: {}", error))?;
status.resource_status = Some(ZoneResourceStatus {
active_resources: Some(resources),
});
self.zones.update(uuid, zone).await?;
Ok(UpdateZoneResourcesReply {})
}
}

View File

@ -1,31 +0,0 @@
use crate::event::DaemonEventContext;
use anyhow::Result;
use async_stream::try_stream;
use krata::v1::control::{WatchEventsReply, WatchEventsRequest};
use std::pin::Pin;
use tokio_stream::Stream;
use tonic::Status;
pub struct WatchEventsRpc {
events: DaemonEventContext,
}
impl WatchEventsRpc {
pub fn new(events: DaemonEventContext) -> Self {
Self { events }
}
pub async fn process(
self,
_request: WatchEventsRequest,
) -> Result<Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>>
{
let mut events = self.events.subscribe();
let output = try_stream! {
while let Ok(event) = events.recv().await {
yield WatchEventsReply { event: Some(event), };
}
};
Ok(Box::pin(output))
}
}

View File

@ -1,21 +0,0 @@
use anyhow::Result;
use redb::Database;
use std::path::Path;
use std::sync::Arc;
pub mod network;
pub mod zone;
#[derive(Clone)]
pub struct KrataDatabase {
pub database: Arc<Database>,
}
impl KrataDatabase {
pub fn open(path: &Path) -> Result<Self> {
let database = Database::create(path)?;
Ok(KrataDatabase {
database: Arc::new(database),
})
}
}

View File

@ -1,134 +0,0 @@
use crate::db::KrataDatabase;
use advmac::MacAddr6;
use anyhow::Result;
use krata::v1::common::NetworkReservation as ApiNetworkReservation;
use log::error;
use redb::{ReadableTable, TableDefinition};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::{Ipv4Addr, Ipv6Addr};
use uuid::Uuid;
const NETWORK_RESERVATION_TABLE: TableDefinition<u128, &[u8]> =
TableDefinition::new("network-reservation");
#[derive(Clone)]
pub struct NetworkReservationStore {
db: KrataDatabase,
}
impl NetworkReservationStore {
pub fn open(db: KrataDatabase) -> Result<Self> {
let write = db.database.begin_write()?;
let _ = write.open_table(NETWORK_RESERVATION_TABLE);
write.commit()?;
Ok(NetworkReservationStore { db })
}
pub async fn read(&self, id: Uuid) -> Result<Option<NetworkReservation>> {
let read = self.db.database.begin_read()?;
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
let Some(entry) = table.get(id.to_u128_le())? else {
return Ok(None);
};
let bytes = entry.value();
Ok(Some(serde_json::from_slice(bytes)?))
}
pub async fn list(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
enum ListEntry {
Valid(Uuid, NetworkReservation),
Invalid(Uuid),
}
let mut reservations: HashMap<Uuid, NetworkReservation> = HashMap::new();
let corruptions = {
let read = self.db.database.begin_read()?;
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
table
.iter()?
.flat_map(|result| {
result.map(|(key, value)| {
let uuid = Uuid::from_u128_le(key.value());
match serde_json::from_slice::<NetworkReservation>(value.value()) {
Ok(reservation) => ListEntry::Valid(uuid, reservation),
Err(error) => {
error!(
"found invalid network reservation in database for uuid {}: {}",
uuid, error
);
ListEntry::Invalid(uuid)
}
}
})
})
.filter_map(|entry| match entry {
ListEntry::Valid(uuid, reservation) => {
reservations.insert(uuid, reservation);
None
}
ListEntry::Invalid(uuid) => Some(uuid),
})
.collect::<Vec<Uuid>>()
};
if !corruptions.is_empty() {
let write = self.db.database.begin_write()?;
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
for corruption in corruptions {
table.remove(corruption.to_u128_le())?;
}
}
Ok(reservations)
}
pub async fn update(&self, id: Uuid, entry: NetworkReservation) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
let bytes = serde_json::to_vec(&entry)?;
table.insert(id.to_u128_le(), bytes.as_slice())?;
}
write.commit()?;
Ok(())
}
pub async fn remove(&self, id: Uuid) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
table.remove(id.to_u128_le())?;
}
write.commit()?;
Ok(())
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NetworkReservation {
pub uuid: String,
pub ipv4: Ipv4Addr,
pub ipv6: Ipv6Addr,
pub mac: MacAddr6,
pub ipv4_prefix: u8,
pub ipv6_prefix: u8,
pub gateway_ipv4: Ipv4Addr,
pub gateway_ipv6: Ipv6Addr,
pub gateway_mac: MacAddr6,
}
impl From<NetworkReservation> for ApiNetworkReservation {
fn from(val: NetworkReservation) -> Self {
ApiNetworkReservation {
uuid: val.uuid,
ipv4: format!("{}/{}", val.ipv4, val.ipv4_prefix),
ipv6: format!("{}/{}", val.ipv6, val.ipv6_prefix),
mac: val.mac.to_string().to_lowercase().replace('-', ":"),
gateway_ipv4: format!("{}/{}", val.gateway_ipv4, val.ipv4_prefix),
gateway_ipv6: format!("{}/{}", val.gateway_ipv6, val.ipv6_prefix),
gateway_mac: val.gateway_mac.to_string().to_lowercase().replace('-', ":"),
}
}
}

View File

@ -1,78 +0,0 @@
use std::collections::HashMap;
use crate::db::KrataDatabase;
use anyhow::Result;
use krata::v1::common::Zone;
use log::error;
use prost::Message;
use redb::{ReadableTable, TableDefinition};
use uuid::Uuid;
const ZONE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new("zone");
#[derive(Clone)]
pub struct ZoneStore {
db: KrataDatabase,
}
impl ZoneStore {
pub fn open(db: KrataDatabase) -> Result<Self> {
let write = db.database.begin_write()?;
let _ = write.open_table(ZONE_TABLE);
write.commit()?;
Ok(ZoneStore { db })
}
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
let read = self.db.database.begin_read()?;
let table = read.open_table(ZONE_TABLE)?;
let Some(entry) = table.get(id.to_u128_le())? else {
return Ok(None);
};
let bytes = entry.value();
Ok(Some(Zone::decode(bytes)?))
}
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
let read = self.db.database.begin_read()?;
let table = read.open_table(ZONE_TABLE)?;
for result in table.iter()? {
let (key, value) = result?;
let uuid = Uuid::from_u128_le(key.value());
let state = match Zone::decode(value.value()) {
Ok(state) => state,
Err(error) => {
error!(
"found invalid zone state in database for uuid {}: {}",
uuid, error
);
continue;
}
};
zones.insert(uuid, state);
}
Ok(zones)
}
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(ZONE_TABLE)?;
let bytes = entry.encode_to_vec();
table.insert(id.to_u128_le(), bytes.as_slice())?;
}
write.commit()?;
Ok(())
}
pub async fn remove(&self, id: Uuid) -> Result<()> {
let write = self.db.database.begin_write()?;
{
let mut table = write.open_table(ZONE_TABLE)?;
table.remove(id.to_u128_le())?;
}
write.commit()?;
Ok(())
}
}

View File

@ -1,106 +0,0 @@
use std::{collections::HashMap, sync::Arc};
use anyhow::{anyhow, Result};
use log::warn;
use tokio::sync::RwLock;
use uuid::Uuid;
use crate::config::{DaemonConfig, DaemonPciDeviceConfig};
#[derive(Clone)]
pub struct DaemonDeviceState {
pub pci: Option<DaemonPciDeviceConfig>,
pub owner: Option<Uuid>,
}
#[derive(Clone)]
pub struct DaemonDeviceManager {
config: Arc<DaemonConfig>,
devices: Arc<RwLock<HashMap<String, DaemonDeviceState>>>,
}
impl DaemonDeviceManager {
pub fn new(config: Arc<DaemonConfig>) -> Self {
Self {
config,
devices: Arc::new(RwLock::new(HashMap::new())),
}
}
pub async fn claim(&self, device: &str, uuid: Uuid) -> Result<DaemonDeviceState> {
let mut devices = self.devices.write().await;
let Some(state) = devices.get_mut(device) else {
return Err(anyhow!(
"unable to claim unknown device '{}' for zone {}",
device,
uuid
));
};
if let Some(owner) = state.owner {
return Err(anyhow!(
"unable to claim device '{}' for zone {}: already claimed by {}",
device,
uuid,
owner
));
}
state.owner = Some(uuid);
Ok(state.clone())
}
pub async fn release_all(&self, uuid: Uuid) -> Result<()> {
let mut devices = self.devices.write().await;
for state in (*devices).values_mut() {
if state.owner == Some(uuid) {
state.owner = None;
}
}
Ok(())
}
pub async fn release(&self, device: &str, uuid: Uuid) -> Result<()> {
let mut devices = self.devices.write().await;
let Some(state) = devices.get_mut(device) else {
return Ok(());
};
if let Some(owner) = state.owner {
if owner != uuid {
return Ok(());
}
}
state.owner = None;
Ok(())
}
pub async fn update_claims(&self, claims: HashMap<String, Uuid>) -> Result<()> {
let mut devices = self.devices.write().await;
devices.clear();
for (name, pci) in &self.config.pci.devices {
let owner = claims.get(name).cloned();
devices.insert(
name.clone(),
DaemonDeviceState {
owner,
pci: Some(pci.clone()),
},
);
}
for (name, uuid) in &claims {
if !devices.contains_key(name) {
warn!("unknown device '{}' assigned to zone {}", name, uuid);
}
}
Ok(())
}
pub async fn copy(&self) -> Result<HashMap<String, DaemonDeviceState>> {
let devices = self.devices.read().await;
Ok(devices.clone())
}
}

View File

@ -1,185 +0,0 @@
use std::{
collections::{hash_map::Entry, HashMap},
str::FromStr,
time::Duration,
};
use crate::db::zone::ZoneStore;
use crate::idm::DaemonIdmHandle;
use anyhow::Result;
use krata::v1::common::ZoneExitStatus;
use krata::{
idm::{internal::event::Event as EventType, internal::Event},
v1::common::{ZoneState, ZoneStatus},
};
use log::{error, warn};
use tokio::{
select,
sync::{
broadcast,
mpsc::{channel, Receiver, Sender},
},
task::JoinHandle,
time,
};
use uuid::Uuid;
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
const IDM_EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
#[derive(Clone)]
pub struct DaemonEventContext {
sender: broadcast::Sender<DaemonEvent>,
}
impl DaemonEventContext {
pub fn subscribe(&self) -> broadcast::Receiver<DaemonEvent> {
self.sender.subscribe()
}
pub fn send(&self, event: DaemonEvent) -> Result<()> {
let _ = self.sender.send(event);
Ok(())
}
}
pub struct DaemonEventGenerator {
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
feed: broadcast::Receiver<DaemonEvent>,
idm: DaemonIdmHandle,
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
idm_sender: Sender<(u32, Event)>,
idm_receiver: Receiver<(u32, Event)>,
_event_sender: broadcast::Sender<DaemonEvent>,
}
impl DaemonEventGenerator {
pub async fn new(
zones: ZoneStore,
zone_reconciler_notify: Sender<Uuid>,
idm: DaemonIdmHandle,
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
let generator = DaemonEventGenerator {
zones,
zone_reconciler_notify,
feed: sender.subscribe(),
idm,
idms: HashMap::new(),
idm_sender,
idm_receiver,
_event_sender: sender.clone(),
};
let context = DaemonEventContext { sender };
Ok((context, generator))
}
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
let DaemonEvent::ZoneChanged(changed) = event;
let Some(ref zone) = changed.zone else {
return Ok(());
};
let Some(ref status) = zone.status else {
return Ok(());
};
let state = status.state();
let id = Uuid::from_str(&zone.id)?;
let domid = status.domid;
match state {
ZoneState::Created => {
if let Entry::Vacant(e) = self.idms.entry(domid) {
let client = self.idm.client_by_domid(domid).await?;
let mut receiver = client.subscribe().await?;
let sender = self.idm_sender.clone();
let task = tokio::task::spawn(async move {
loop {
let Ok(event) = receiver.recv().await else {
break;
};
if let Err(error) = sender.send((domid, event)).await {
warn!("unable to deliver idm event: {}", error);
}
}
});
e.insert((id, task));
}
}
ZoneState::Destroyed => {
if let Some((_, handle)) = self.idms.remove(&domid) {
handle.abort();
}
}
_ => {}
}
Ok(())
}
async fn handle_idm_event(&mut self, id: Uuid, event: Event) -> Result<()> {
match event.event {
Some(EventType::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
None => Ok(()),
}
}
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
if let Some(mut zone) = self.zones.read(id).await? {
zone.status = Some(ZoneStatus {
state: ZoneState::Exited.into(),
network_status: zone.status.clone().unwrap_or_default().network_status,
exit_status: Some(ZoneExitStatus { code }),
error_status: None,
resource_status: zone.status.clone().unwrap_or_default().resource_status,
host: zone.status.clone().map(|x| x.host).unwrap_or_default(),
domid: zone.status.clone().map(|x| x.domid).unwrap_or(u32::MAX),
});
self.zones.update(id, zone).await?;
self.zone_reconciler_notify.send(id).await?;
}
Ok(())
}
async fn evaluate(&mut self) -> Result<()> {
select! {
x = self.idm_receiver.recv() => match x {
Some((domid, event)) => {
if let Some((id, _)) = self.idms.get(&domid) {
self.handle_idm_event(*id, event).await?;
}
Ok(())
},
None => {
Ok(())
}
},
x = self.feed.recv() => match x {
Ok(event) => {
self.handle_feed_event(&event).await
},
Err(error) => {
Err(error.into())
}
},
}
}
pub async fn launch(mut self) -> Result<JoinHandle<()>> {
Ok(tokio::task::spawn(async move {
loop {
if let Err(error) = self.evaluate().await {
error!("failed to evaluate daemon events: {}", error);
time::sleep(Duration::from_secs(5)).await;
}
}
}))
}
}

View File

@ -1,305 +0,0 @@
use std::{
collections::{hash_map::Entry, HashMap},
sync::Arc,
};
use anyhow::{anyhow, Result};
use bytes::{Buf, BytesMut};
use krata::idm::{
client::{IdmBackend, IdmInternalClient},
internal::INTERNAL_IDM_CHANNEL,
transport::IdmTransportPacket,
};
use kratart::channel::ChannelService;
use log::{debug, error, warn};
use prost::Message;
use tokio::{
select,
sync::{
broadcast,
mpsc::{channel, Receiver, Sender},
Mutex,
},
task::JoinHandle,
};
use uuid::Uuid;
use crate::zlt::ZoneLookupTable;
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
#[derive(Clone)]
pub struct DaemonIdmHandle {
zlt: ZoneLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmTransportPacket)>,
task: Arc<JoinHandle<()>>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
}
impl DaemonIdmHandle {
pub fn snoop(&self) -> broadcast::Receiver<DaemonIdmSnoopPacket> {
self.snoop_sender.subscribe()
}
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
return Err(anyhow!("unable to find domain {}", uuid));
};
self.client_by_domid(domid).await
}
pub async fn client_by_domid(&self, domid: u32) -> Result<IdmInternalClient> {
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
}
}
impl Drop for DaemonIdmHandle {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
}
}
}
#[derive(Clone)]
pub struct DaemonIdmSnoopPacket {
pub from: u32,
pub to: u32,
pub packet: IdmTransportPacket,
}
pub struct DaemonIdm {
zlt: ZoneLookupTable,
clients: ClientMap,
feeds: BackendFeedMap,
tx_sender: Sender<(u32, IdmTransportPacket)>,
tx_raw_sender: Sender<(u32, Vec<u8>)>,
tx_receiver: Receiver<(u32, IdmTransportPacket)>,
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
task: JoinHandle<()>,
}
impl DaemonIdm {
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonIdm> {
debug!("allocating channel service for idm");
let (service, tx_raw_sender, rx_receiver) =
ChannelService::new("krata-channel".to_string(), None).await?;
let (tx_sender, tx_receiver) = channel(100);
let (snoop_sender, _) = broadcast::channel(100);
debug!("starting idm channel service");
let task = service.launch().await?;
let clients = Arc::new(Mutex::new(HashMap::new()));
let feeds = Arc::new(Mutex::new(HashMap::new()));
Ok(DaemonIdm {
zlt,
rx_receiver,
tx_receiver,
tx_sender,
tx_raw_sender,
snoop_sender,
task,
clients,
feeds,
})
}
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
let zlt = self.zlt.clone();
let clients = self.clients.clone();
let feeds = self.feeds.clone();
let tx_sender = self.tx_sender.clone();
let snoop_sender = self.snoop_sender.clone();
let task = tokio::task::spawn(async move {
let mut buffers: HashMap<u32, BytesMut> = HashMap::new();
while let Err(error) = self.process(&mut buffers).await {
error!("failed to process idm: {}", error);
}
});
Ok(DaemonIdmHandle {
zlt,
clients,
feeds,
tx_sender,
snoop_sender,
task: Arc::new(task),
})
}
async fn process_rx_packet(
&mut self,
domid: u32,
data: Option<Vec<u8>>,
buffers: &mut HashMap<u32, BytesMut>,
) -> Result<()> {
// check if data is present, if it is not, that signals a closed channel.
if let Some(data) = data {
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
buffer.extend_from_slice(&data);
loop {
// check if the buffer is less than the header size, if so, wait for more data
if buffer.len() < 6 {
break;
}
// check for the magic bytes 0xff, 0xff at the start of the message, if that doesn't
// exist, clear the buffer. this ensures that partial messages won't be processed.
if buffer[0] != 0xff || buffer[1] != 0xff {
buffer.clear();
return Ok(());
}
// read the size from the buffer as a little endian u32
let size = (buffer[2] as u32
| (buffer[3] as u32) << 8
| (buffer[4] as u32) << 16
| (buffer[5] as u32) << 24) as usize;
let needed = size + 6;
if buffer.len() < needed {
return Ok(());
}
let mut packet = buffer.split_to(needed);
// advance the buffer by the header, leaving only the raw data.
packet.advance(6);
match IdmTransportPacket::decode(packet) {
Ok(packet) => {
let _ =
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds)
.await?;
let guard = self.feeds.lock().await;
if let Some(feed) = guard.get(&domid) {
let _ = feed.try_send(packet.clone());
}
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
from: domid,
to: 0,
packet,
});
}
Err(packet) => {
warn!("received invalid packet from domain {}: {}", domid, packet);
}
}
}
} else {
let mut clients = self.clients.lock().await;
let mut feeds = self.feeds.lock().await;
clients.remove(&domid);
feeds.remove(&domid);
}
Ok(())
}
async fn tx_packet(&mut self, domid: u32, packet: IdmTransportPacket) -> Result<()> {
let data = packet.encode_to_vec();
let mut buffer = vec![0u8; 6];
let length = data.len() as u32;
// magic bytes
buffer[0] = 0xff;
buffer[1] = 0xff;
// little endian u32 for message size
buffer[2] = length as u8;
buffer[3] = (length << 8) as u8;
buffer[4] = (length << 16) as u8;
buffer[5] = (length << 24) as u8;
buffer.extend_from_slice(&data);
self.tx_raw_sender.send((domid, buffer)).await?;
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
from: 0,
to: domid,
packet,
});
Ok(())
}
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
loop {
select! {
x = self.rx_receiver.recv() => match x {
Some((domid, data)) => {
self.process_rx_packet(domid, data, buffers).await?;
},
None => {
break;
}
},
x = self.tx_receiver.recv() => match x {
Some((domid, packet)) => {
self.tx_packet(domid, packet).await?;
},
None => {
break;
}
}
}
}
Ok(())
}
}
impl Drop for DaemonIdm {
fn drop(&mut self) {
self.task.abort();
}
}
async fn client_or_create(
domid: u32,
tx_sender: &Sender<(u32, IdmTransportPacket)>,
clients: &ClientMap,
feeds: &BackendFeedMap,
) -> Result<IdmInternalClient> {
let mut clients = clients.lock().await;
let mut feeds = feeds.lock().await;
match clients.entry(domid) {
Entry::Occupied(entry) => Ok(entry.get().clone()),
Entry::Vacant(entry) => {
let (rx_sender, rx_receiver) = channel(100);
feeds.insert(domid, rx_sender);
let backend = IdmDaemonBackend {
domid,
rx_receiver,
tx_sender: tx_sender.clone(),
};
let client = IdmInternalClient::new(
INTERNAL_IDM_CHANNEL,
Box::new(backend) as Box<dyn IdmBackend>,
)
.await?;
entry.insert(client.clone());
Ok(client)
}
}
}
pub struct IdmDaemonBackend {
domid: u32,
rx_receiver: Receiver<IdmTransportPacket>,
tx_sender: Sender<(u32, IdmTransportPacket)>,
}
#[async_trait::async_trait]
impl IdmBackend for IdmDaemonBackend {
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>> {
if let Some(packet) = self.rx_receiver.recv().await {
Ok(vec![packet])
} else {
Err(anyhow!("idm receive channel closed"))
}
}
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()> {
self.tx_sender.send((self.domid, packet)).await?;
Ok(())
}
}

View File

@ -1,269 +0,0 @@
use crate::db::network::NetworkReservationStore;
use crate::db::zone::ZoneStore;
use crate::db::KrataDatabase;
use crate::network::assignment::NetworkAssignment;
use anyhow::{anyhow, Result};
use config::DaemonConfig;
use console::{DaemonConsole, DaemonConsoleHandle};
use control::DaemonControlService;
use devices::DaemonDeviceManager;
use event::{DaemonEventContext, DaemonEventGenerator};
use idm::{DaemonIdm, DaemonIdmHandle};
use ipnetwork::{Ipv4Network, Ipv6Network};
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
use kratart::Runtime;
use log::{debug, info};
use reconcile::zone::ZoneReconciler;
use std::path::Path;
use std::time::Duration;
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
use tokio::{
fs,
net::UnixListener,
sync::mpsc::{channel, Sender},
task::JoinHandle,
};
use tokio_stream::wrappers::UnixListenerStream;
use tonic::transport::{Identity, Server, ServerTlsConfig};
use uuid::Uuid;
use zlt::ZoneLookupTable;
pub mod command;
pub mod config;
pub mod console;
pub mod control;
pub mod db;
pub mod devices;
pub mod event;
pub mod idm;
pub mod metrics;
pub mod network;
pub mod oci;
pub mod reconcile;
pub mod zlt;
pub struct Daemon {
store: String,
_config: Arc<DaemonConfig>,
zlt: ZoneLookupTable,
devices: DaemonDeviceManager,
zones: ZoneStore,
network: NetworkAssignment,
events: DaemonEventContext,
zone_reconciler_task: JoinHandle<()>,
zone_reconciler_notify: Sender<Uuid>,
generator_task: JoinHandle<()>,
idm: DaemonIdmHandle,
console: DaemonConsoleHandle,
packer: OciPackerService,
runtime: Runtime,
}
const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
impl Daemon {
pub async fn new(store: String) -> Result<Self> {
let store_dir = PathBuf::from(store.clone());
debug!("loading configuration");
let mut config_path = store_dir.clone();
config_path.push("config.toml");
let config = DaemonConfig::load(&config_path).await?;
let config = Arc::new(config);
debug!("initializing device manager");
let devices = DaemonDeviceManager::new(config.clone());
debug!("validating image cache directory");
let mut image_cache_dir = store_dir.clone();
image_cache_dir.push("cache");
image_cache_dir.push("image");
fs::create_dir_all(&image_cache_dir).await?;
debug!("loading zone0 uuid");
let mut host_uuid_path = store_dir.clone();
host_uuid_path.push("host.uuid");
let host_uuid = if host_uuid_path.is_file() {
let content = fs::read_to_string(&host_uuid_path).await?;
Uuid::from_str(content.trim()).ok()
} else {
None
};
let host_uuid = if let Some(host_uuid) = host_uuid {
host_uuid
} else {
let generated = Uuid::new_v4();
let mut string = generated.to_string();
string.push('\n');
fs::write(&host_uuid_path, string).await?;
generated
};
debug!("validating zone asset directories");
let initrd_path = detect_zone_path(&store, "initrd")?;
let kernel_path = detect_zone_path(&store, "kernel")?;
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
debug!("initializing caches and hydrating zone state");
let seed = config.oci.seed.clone().map(PathBuf::from);
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
debug!("initializing core runtime");
let runtime = Runtime::new().await?;
let zlt = ZoneLookupTable::new(0, host_uuid);
let db_path = format!("{}/krata.db", store);
let database = KrataDatabase::open(Path::new(&db_path))?;
let zones = ZoneStore::open(database.clone())?;
let (zone_reconciler_notify, zone_reconciler_receiver) =
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
debug!("starting IDM service");
let idm = DaemonIdm::new(zlt.clone()).await?;
let idm = idm.launch().await?;
debug!("initializing console interfaces");
let console = DaemonConsole::new(zlt.clone()).await?;
let console = console.launch().await?;
let (events, generator) =
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
.await?;
let runtime_for_reconciler = runtime.dupe().await?;
let ipv4_network = Ipv4Network::from_str(&config.network.ipv4.subnet)?;
let ipv6_network = Ipv6Network::from_str(&config.network.ipv6.subnet)?;
let network_reservation_store = NetworkReservationStore::open(database)?;
let network = NetworkAssignment::new(
host_uuid,
ipv4_network,
ipv6_network,
network_reservation_store,
)
.await?;
debug!("initializing zone reconciler");
let zone_reconciler = ZoneReconciler::new(
devices.clone(),
zlt.clone(),
zones.clone(),
events.clone(),
runtime_for_reconciler,
packer.clone(),
zone_reconciler_notify.clone(),
kernel_path,
initrd_path,
addons_path,
network.clone(),
config.clone(),
)?;
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
let generator_task = generator.launch().await?;
// TODO: Create a way of abstracting early init tasks in kratad.
// TODO: Make initial power management policy configurable.
let power = runtime.power_management_context().await?;
power.set_smt_policy(true).await?;
power
.set_scheduler_policy("performance".to_string())
.await?;
info!("power management initialized");
info!("krata daemon initialized");
Ok(Self {
store,
_config: config,
zlt,
devices,
zones,
network,
events,
zone_reconciler_task,
zone_reconciler_notify,
generator_task,
idm,
console,
packer,
runtime,
})
}
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
debug!("starting control service");
let control_service = DaemonControlService::new(
self.zlt.clone(),
self.devices.clone(),
self.events.clone(),
self.console.clone(),
self.idm.clone(),
self.zones.clone(),
self.network.clone(),
self.zone_reconciler_notify.clone(),
self.packer.clone(),
self.runtime.clone(),
);
let mut server = Server::builder();
if let ControlDialAddress::Tls {
host: _,
port: _,
insecure,
} = &addr
{
let mut tls_config = ServerTlsConfig::new();
if !insecure {
let certificate_path = format!("{}/tls/daemon.pem", self.store);
let key_path = format!("{}/tls/daemon.key", self.store);
tls_config = tls_config.identity(Identity::from_pem(certificate_path, key_path));
}
server = server.tls_config(tls_config)?;
}
server = server.http2_keepalive_interval(Some(Duration::from_secs(10)));
let server = server.add_service(ControlServiceServer::new(control_service));
info!("listening on address {}", addr);
match addr {
ControlDialAddress::UnixSocket { path } => {
let path = PathBuf::from(path);
if path.exists() {
fs::remove_file(&path).await?;
}
let listener = UnixListener::bind(path)?;
let stream = UnixListenerStream::new(listener);
server.serve_with_incoming(stream).await?;
}
ControlDialAddress::Tcp { host, port } => {
let address = format!("{}:{}", host, port);
server.serve(SocketAddr::from_str(&address)?).await?;
}
ControlDialAddress::Tls {
host,
port,
insecure: _,
} => {
let address = format!("{}:{}", host, port);
server.serve(SocketAddr::from_str(&address)?).await?;
}
}
Ok(())
}
}
impl Drop for Daemon {
fn drop(&mut self) {
self.zone_reconciler_task.abort();
self.generator_task.abort();
}
}
fn detect_zone_path(store: &str, name: &str) -> Result<PathBuf> {
let mut path = PathBuf::from(format!("{}/zone/{}", store, name));
if path.is_file() {
return Ok(path);
}
path = PathBuf::from(format!("/usr/share/krata/zone/{}", name));
if path.is_file() {
return Ok(path);
}
Err(anyhow!("unable to find required zone file: {}", name))
}

View File

@ -1,27 +0,0 @@
use krata::{
idm::internal::{MetricFormat, MetricNode},
v1::common::{ZoneMetricFormat, ZoneMetricNode},
};
fn idm_metric_format_to_api(format: MetricFormat) -> ZoneMetricFormat {
match format {
MetricFormat::Unknown => ZoneMetricFormat::Unknown,
MetricFormat::Bytes => ZoneMetricFormat::Bytes,
MetricFormat::Integer => ZoneMetricFormat::Integer,
MetricFormat::DurationSeconds => ZoneMetricFormat::DurationSeconds,
}
}
pub fn idm_metric_to_api(node: MetricNode) -> ZoneMetricNode {
let format = node.format();
ZoneMetricNode {
name: node.name,
value: node.value,
format: idm_metric_format_to_api(format).into(),
children: node
.children
.into_iter()
.map(idm_metric_to_api)
.collect::<Vec<_>>(),
}
}

View File

@ -1,204 +0,0 @@
use advmac::MacAddr6;
use anyhow::{anyhow, Result};
use ipnetwork::{Ipv4Network, Ipv6Network};
use std::{
collections::HashMap,
net::{Ipv4Addr, Ipv6Addr},
sync::Arc,
};
use tokio::sync::RwLock;
use uuid::Uuid;
use crate::db::network::{NetworkReservation, NetworkReservationStore};
#[derive(Default, Clone)]
pub struct NetworkAssignmentState {
pub ipv4: HashMap<Ipv4Addr, NetworkReservation>,
pub ipv6: HashMap<Ipv6Addr, NetworkReservation>,
}
#[derive(Clone)]
pub struct NetworkAssignment {
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
gateway_ipv4: Ipv4Addr,
gateway_ipv6: Ipv6Addr,
gateway_mac: MacAddr6,
store: NetworkReservationStore,
state: Arc<RwLock<NetworkAssignmentState>>,
}
impl NetworkAssignment {
pub async fn new(
host_uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
store: NetworkReservationStore,
) -> Result<Self> {
let mut state = NetworkAssignment::fetch_current_state(&store).await?;
let gateway_reservation = if let Some(reservation) = store.read(Uuid::nil()).await? {
reservation
} else {
NetworkAssignment::allocate(
&mut state,
&store,
Uuid::nil(),
ipv4_network,
ipv6_network,
None,
None,
None,
)
.await?
};
if store.read(host_uuid).await?.is_none() {
let _ = NetworkAssignment::allocate(
&mut state,
&store,
host_uuid,
ipv4_network,
ipv6_network,
Some(gateway_reservation.gateway_ipv4),
Some(gateway_reservation.gateway_ipv6),
Some(gateway_reservation.gateway_mac),
)
.await?;
}
let assignment = NetworkAssignment {
ipv4_network,
ipv6_network,
gateway_ipv4: gateway_reservation.ipv4,
gateway_ipv6: gateway_reservation.ipv6,
gateway_mac: gateway_reservation.mac,
store,
state: Arc::new(RwLock::new(state)),
};
Ok(assignment)
}
async fn fetch_current_state(
store: &NetworkReservationStore,
) -> Result<NetworkAssignmentState> {
let reservations = store.list().await?;
let mut state = NetworkAssignmentState::default();
for reservation in reservations.values() {
state.ipv4.insert(reservation.ipv4, reservation.clone());
state.ipv6.insert(reservation.ipv6, reservation.clone());
}
Ok(state)
}
#[allow(clippy::too_many_arguments)]
async fn allocate(
state: &mut NetworkAssignmentState,
store: &NetworkReservationStore,
uuid: Uuid,
ipv4_network: Ipv4Network,
ipv6_network: Ipv6Network,
gateway_ipv4: Option<Ipv4Addr>,
gateway_ipv6: Option<Ipv6Addr>,
gateway_mac: Option<MacAddr6>,
) -> Result<NetworkReservation> {
let found_ipv4: Option<Ipv4Addr> = ipv4_network
.iter()
.filter(|ip| {
ip.is_private() && !(ip.is_loopback() || ip.is_multicast() || ip.is_broadcast())
})
.filter(|ip| {
let last = ip.octets()[3];
// filter for IPs ending in .1 to .250 because .250+ can have special meaning
(1..250).contains(&last)
})
.find(|ip| !state.ipv4.contains_key(ip));
let found_ipv6: Option<Ipv6Addr> = ipv6_network
.iter()
.filter(|ip| !ip.is_loopback() && !ip.is_multicast())
.filter(|ip| {
let last = ip.octets()[15];
last > 0
})
.find(|ip| !state.ipv6.contains_key(ip));
let Some(ipv4) = found_ipv4 else {
return Err(anyhow!(
"unable to allocate ipv4 address, assigned network is exhausted"
));
};
let Some(ipv6) = found_ipv6 else {
return Err(anyhow!(
"unable to allocate ipv6 address, assigned network is exhausted"
));
};
let mut mac = MacAddr6::random();
mac.set_local(true);
mac.set_multicast(false);
let reservation = NetworkReservation {
uuid: uuid.to_string(),
ipv4,
ipv6,
mac,
ipv4_prefix: ipv4_network.prefix(),
ipv6_prefix: ipv6_network.prefix(),
gateway_ipv4: gateway_ipv4.unwrap_or(ipv4),
gateway_ipv6: gateway_ipv6.unwrap_or(ipv6),
gateway_mac: gateway_mac.unwrap_or(mac),
};
state.ipv4.insert(ipv4, reservation.clone());
state.ipv6.insert(ipv6, reservation.clone());
store.update(uuid, reservation.clone()).await?;
Ok(reservation)
}
pub async fn assign(&self, uuid: Uuid) -> Result<NetworkReservation> {
let mut state = self.state.write().await;
let reservation = NetworkAssignment::allocate(
&mut state,
&self.store,
uuid,
self.ipv4_network,
self.ipv6_network,
Some(self.gateway_ipv4),
Some(self.gateway_ipv6),
Some(self.gateway_mac),
)
.await?;
Ok(reservation)
}
pub async fn recall(&self, uuid: Uuid) -> Result<()> {
let mut state = self.state.write().await;
self.store.remove(uuid).await?;
state
.ipv4
.retain(|_, reservation| reservation.uuid != uuid.to_string());
state
.ipv6
.retain(|_, reservation| reservation.uuid != uuid.to_string());
Ok(())
}
pub async fn retrieve(&self, uuid: Uuid) -> Result<Option<NetworkReservation>> {
self.store.read(uuid).await
}
pub async fn reload(&self) -> Result<()> {
let mut state = self.state.write().await;
let intermediate = NetworkAssignment::fetch_current_state(&self.store).await?;
*state = intermediate;
Ok(())
}
pub async fn read(&self) -> Result<NetworkAssignmentState> {
Ok(self.state.read().await.clone())
}
pub async fn read_reservations(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
self.store.list().await
}
}

View File

@ -1 +0,0 @@
pub mod assignment;

View File

@ -1,79 +0,0 @@
use krata::v1::control::{
image_progress_indication::Indication, ImageProgress, ImageProgressIndication,
ImageProgressIndicationBar, ImageProgressIndicationCompleted, ImageProgressIndicationHidden,
ImageProgressIndicationSpinner, ImageProgressLayer, ImageProgressLayerPhase,
ImageProgressPhase,
};
use krataoci::progress::{
OciProgress, OciProgressIndication, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase,
};
fn convert_oci_progress_indication(indication: OciProgressIndication) -> ImageProgressIndication {
ImageProgressIndication {
indication: Some(match indication {
OciProgressIndication::Hidden => Indication::Hidden(ImageProgressIndicationHidden {}),
OciProgressIndication::ProgressBar {
message,
current,
total,
bytes,
} => Indication::Bar(ImageProgressIndicationBar {
message: message.unwrap_or_default(),
current,
total,
is_bytes: bytes,
}),
OciProgressIndication::Spinner { message } => {
Indication::Spinner(ImageProgressIndicationSpinner {
message: message.unwrap_or_default(),
})
}
OciProgressIndication::Completed {
message,
total,
bytes,
} => Indication::Completed(ImageProgressIndicationCompleted {
message: message.unwrap_or_default(),
total: total.unwrap_or(0),
is_bytes: bytes,
}),
}),
}
}
fn convert_oci_layer_progress(layer: OciProgressLayer) -> ImageProgressLayer {
ImageProgressLayer {
id: layer.id,
phase: match layer.phase {
OciProgressLayerPhase::Waiting => ImageProgressLayerPhase::Waiting,
OciProgressLayerPhase::Downloading => ImageProgressLayerPhase::Downloading,
OciProgressLayerPhase::Downloaded => ImageProgressLayerPhase::Downloaded,
OciProgressLayerPhase::Extracting => ImageProgressLayerPhase::Extracting,
OciProgressLayerPhase::Extracted => ImageProgressLayerPhase::Extracted,
}
.into(),
indication: Some(convert_oci_progress_indication(layer.indication)),
}
}
pub fn convert_oci_progress(oci: OciProgress) -> ImageProgress {
ImageProgress {
phase: match oci.phase {
OciProgressPhase::Started => ImageProgressPhase::Started,
OciProgressPhase::Resolving => ImageProgressPhase::Resolving,
OciProgressPhase::Resolved => ImageProgressPhase::Resolved,
OciProgressPhase::ConfigDownload => ImageProgressPhase::ConfigDownload,
OciProgressPhase::LayerDownload => ImageProgressPhase::LayerDownload,
OciProgressPhase::Assemble => ImageProgressPhase::Assemble,
OciProgressPhase::Pack => ImageProgressPhase::Pack,
OciProgressPhase::Complete => ImageProgressPhase::Complete,
}
.into(),
layers: oci
.layers
.into_values()
.map(convert_oci_layer_progress)
.collect::<Vec<_>>(),
indication: Some(convert_oci_progress_indication(oci.indication)),
}
}

View File

@ -1 +0,0 @@
pub mod zone;

View File

@ -1,259 +0,0 @@
use anyhow::{anyhow, Result};
use futures::StreamExt;
use krata::launchcfg::LaunchPackedFormat;
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
use krata::v1::common::{ZoneOciImageSpec, ZoneResourceStatus};
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy, ZoneLaunchNetwork};
use kratart::{launch::ZoneLaunchRequest, Runtime};
use log::info;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, Ordering};
use crate::config::{DaemonConfig, DaemonPciDeviceRdmReservePolicy};
use crate::devices::DaemonDeviceManager;
use crate::network::assignment::NetworkAssignment;
use crate::reconcile::zone::network_reservation_to_network_status;
use crate::{reconcile::zone::ZoneReconcilerResult, zlt::ZoneLookupTable};
use krata::v1::common::zone_image_spec::Image;
use tokio::fs::{self, File};
use tokio::io::AsyncReadExt;
use tokio_tar::Archive;
use uuid::Uuid;
pub struct ZoneCreator<'a> {
pub devices: &'a DaemonDeviceManager,
pub kernel_path: &'a Path,
pub initrd_path: &'a Path,
pub addons_path: &'a Path,
pub packer: &'a OciPackerService,
pub network_assignment: &'a NetworkAssignment,
pub zlt: &'a ZoneLookupTable,
pub runtime: &'a Runtime,
pub config: &'a DaemonConfig,
}
impl ZoneCreator<'_> {
pub async fn oci_spec_tar_read_file(
&self,
file: &Path,
oci: &ZoneOciImageSpec,
) -> Result<Vec<u8>> {
if oci.format() != OciImageFormat::Tar {
return Err(anyhow!(
"oci image spec for {} is required to be in tar format",
oci.digest
));
}
let image = self
.packer
.recall(&oci.digest, OciPackedFormat::Tar)
.await?;
let Some(image) = image else {
return Err(anyhow!("image {} was not found in tar format", oci.digest));
};
let mut archive = Archive::new(File::open(&image.path).await?);
let mut entries = archive.entries()?;
while let Some(entry) = entries.next().await {
let mut entry = entry?;
let path = entry.path()?;
if path == file {
let mut buffer = Vec::new();
entry.read_to_end(&mut buffer).await?;
return Ok(buffer);
}
}
Err(anyhow!(
"unable to find file {} in image {}",
file.to_string_lossy(),
oci.digest
))
}
pub async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let Some(ref mut spec) = zone.spec else {
return Err(anyhow!("zone spec not specified"));
};
let Some(ref image) = spec.image else {
return Err(anyhow!("image spec not provided"));
};
let oci = match image.image {
Some(Image::Oci(ref oci)) => oci,
None => {
return Err(anyhow!("oci spec not specified"));
}
};
let task = spec.task.as_ref().cloned().unwrap_or_default();
let image = self
.packer
.recall(
&oci.digest,
match oci.format() {
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
OciImageFormat::Erofs => OciPackedFormat::Erofs,
OciImageFormat::Tar => {
return Err(anyhow!("tar image format is not supported for zones"));
}
},
)
.await?;
let Some(image) = image else {
return Err(anyhow!(
"image {} in the requested format did not exist",
oci.digest
));
};
let kernel = if let Some(ref spec) = spec.kernel {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("kernel image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("kernel/image"), oci)
.await?
} else {
fs::read(&self.kernel_path).await?
};
let initrd = if let Some(ref spec) = spec.initrd {
let Some(Image::Oci(ref oci)) = spec.image else {
return Err(anyhow!("initrd image spec must be an oci image"));
};
self.oci_spec_tar_read_file(&PathBuf::from("krata/initrd"), oci)
.await?
} else {
fs::read(&self.initrd_path).await?
};
let success = AtomicBool::new(false);
let _device_release_guard = scopeguard::guard(
(spec.devices.clone(), self.devices.clone()),
|(devices, manager)| {
if !success.load(Ordering::Acquire) {
tokio::task::spawn(async move {
for device in devices {
let _ = manager.release(&device.name, uuid).await;
}
});
}
},
);
let mut pcis = Vec::new();
for device in &spec.devices {
let state = self.devices.claim(&device.name, uuid).await?;
if let Some(cfg) = state.pci {
for location in cfg.locations {
let pci = PciDevice {
bdf: PciBdf::from_str(&location)?.with_domain(0),
permissive: cfg.permissive,
msi_translate: cfg.msi_translate,
power_management: cfg.power_management,
rdm_reserve_policy: match cfg.rdm_reserve_policy {
DaemonPciDeviceRdmReservePolicy::Strict => PciRdmReservePolicy::Strict,
DaemonPciDeviceRdmReservePolicy::Relaxed => {
PciRdmReservePolicy::Relaxed
}
},
};
pcis.push(pci);
}
} else {
return Err(anyhow!(
"device '{}' isn't a known device type",
device.name
));
}
}
let reservation = self.network_assignment.assign(uuid).await?;
let mut initial_resources = spec.initial_resources.unwrap_or_default();
if initial_resources.target_cpus < 1 {
initial_resources.target_cpus = 1;
}
if initial_resources.target_cpus > initial_resources.max_cpus {
initial_resources.max_cpus = initial_resources.target_cpus;
}
spec.initial_resources = Some(initial_resources);
let kernel_options = spec.kernel_options.clone().unwrap_or_default();
let info = self
.runtime
.launch(ZoneLaunchRequest {
format: match image.format {
OciPackedFormat::Squashfs => LaunchPackedFormat::Squashfs,
OciPackedFormat::Erofs => LaunchPackedFormat::Erofs,
_ => {
return Err(anyhow!(
"oci image is in an invalid format, which isn't compatible with launch"
));
}
},
uuid: Some(uuid),
name: if spec.name.is_empty() {
None
} else {
Some(spec.name.clone())
},
image,
kernel,
initrd,
target_cpus: initial_resources.target_cpus,
max_cpus: initial_resources.max_cpus,
max_memory: initial_resources.max_memory,
target_memory: initial_resources.target_memory,
pcis,
env: task
.environment
.iter()
.map(|x| (x.key.clone(), x.value.clone()))
.collect::<HashMap<_, _>>(),
run: empty_vec_optional(task.command.clone()),
kernel_verbose: kernel_options.verbose,
kernel_cmdline_append: kernel_options.cmdline_append,
addons_image: Some(self.addons_path.to_path_buf()),
network: ZoneLaunchNetwork {
ipv4: reservation.ipv4.to_string(),
ipv4_prefix: reservation.ipv4_prefix,
ipv6: reservation.ipv6.to_string(),
ipv6_prefix: reservation.ipv6_prefix,
gateway_ipv4: reservation.gateway_ipv4.to_string(),
gateway_ipv6: reservation.gateway_ipv6.to_string(),
zone_mac: reservation.mac,
nameservers: self.config.network.nameservers.clone(),
},
})
.await?;
self.zlt.associate(uuid, info.domid).await;
info!("created zone {}", uuid);
zone.status = Some(ZoneStatus {
state: ZoneState::Created.into(),
network_status: Some(network_reservation_to_network_status(&reservation)),
exit_status: None,
error_status: None,
resource_status: Some(ZoneResourceStatus {
active_resources: Some(initial_resources),
}),
host: self.zlt.host_uuid().to_string(),
domid: info.domid,
});
success.store(true, Ordering::Release);
Ok(ZoneReconcilerResult::Changed { rerun: false })
}
}
fn empty_vec_optional<T>(value: Vec<T>) -> Option<Vec<T>> {
if value.is_empty() {
None
} else {
Some(value)
}
}

View File

@ -1,381 +0,0 @@
use std::{
collections::{hash_map::Entry, HashMap},
path::PathBuf,
sync::Arc,
time::Duration,
};
use self::create::ZoneCreator;
use crate::config::DaemonConfig;
use crate::db::network::NetworkReservation;
use crate::network::assignment::NetworkAssignment;
use crate::{
db::zone::ZoneStore,
devices::DaemonDeviceManager,
event::{DaemonEvent, DaemonEventContext},
zlt::ZoneLookupTable,
};
use anyhow::Result;
use krata::v1::{
common::{Zone, ZoneErrorStatus, ZoneExitStatus, ZoneNetworkStatus, ZoneState, ZoneStatus},
control::ZoneChangedEvent,
};
use krataoci::packer::service::OciPackerService;
use kratart::Runtime;
use log::{error, info, trace, warn};
use tokio::{
select,
sync::{
mpsc::{channel, Receiver, Sender},
RwLock,
},
task::JoinHandle,
time::sleep,
};
use uuid::Uuid;
mod create;
const PARALLEL_LIMIT: u32 = 5;
#[derive(Debug)]
enum ZoneReconcilerResult {
Unchanged,
Changed { rerun: bool },
}
struct ZoneReconcilerEntry {
sender: Sender<()>,
}
#[derive(Clone)]
pub struct ZoneReconciler {
devices: DaemonDeviceManager,
zlt: ZoneLookupTable,
zones: ZoneStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
kernel_path: PathBuf,
initrd_path: PathBuf,
addons_path: PathBuf,
tasks: Arc<RwLock<HashMap<Uuid, ZoneReconcilerEntry>>>,
zone_reconciler_notify: Sender<Uuid>,
zone_reconcile_lock: Arc<RwLock<()>>,
ip_assignment: NetworkAssignment,
config: Arc<DaemonConfig>,
}
impl ZoneReconciler {
#[allow(clippy::too_many_arguments)]
pub fn new(
devices: DaemonDeviceManager,
zlt: ZoneLookupTable,
zones: ZoneStore,
events: DaemonEventContext,
runtime: Runtime,
packer: OciPackerService,
zone_reconciler_notify: Sender<Uuid>,
kernel_path: PathBuf,
initrd_path: PathBuf,
modules_path: PathBuf,
ip_assignment: NetworkAssignment,
config: Arc<DaemonConfig>,
) -> Result<Self> {
Ok(Self {
devices,
zlt,
zones,
events,
runtime,
packer,
kernel_path,
initrd_path,
addons_path: modules_path,
tasks: Arc::new(RwLock::new(HashMap::new())),
zone_reconciler_notify,
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
ip_assignment,
config,
})
}
pub async fn launch(self, mut notify: Receiver<Uuid>) -> Result<JoinHandle<()>> {
Ok(tokio::task::spawn(async move {
if let Err(error) = self.reconcile_runtime(true).await {
error!("runtime reconciler failed: {}", error);
}
loop {
select! {
x = notify.recv() => match x {
None => {
break;
},
Some(uuid) => {
if let Err(error) = self.launch_task_if_needed(uuid).await {
error!("failed to start zone reconciler task {}: {}", uuid, error);
}
let map = self.tasks.read().await;
if let Some(entry) = map.get(&uuid) {
if let Err(error) = entry.sender.send(()).await {
error!("failed to notify zone reconciler task {}: {}", uuid, error);
}
}
}
},
_ = sleep(Duration::from_secs(15)) => {
if let Err(error) = self.reconcile_runtime(false).await {
error!("runtime reconciler failed: {}", error);
}
}
}
}
}))
}
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
let _permit = self.zone_reconcile_lock.write().await;
trace!("reconciling runtime");
let runtime_zones = self.runtime.list().await?;
let stored_zones = self.zones.list().await?;
let non_existent_zones = runtime_zones
.iter()
.filter(|x| !stored_zones.iter().any(|g| *g.0 == x.uuid))
.collect::<Vec<_>>();
for zone in non_existent_zones {
warn!("destroying unknown runtime zone {}", zone.uuid);
if let Err(error) = self.runtime.destroy(zone.uuid).await {
error!(
"failed to destroy unknown runtime zone {}: {}",
zone.uuid, error
);
}
self.zones.remove(zone.uuid).await?;
}
let mut device_claims = HashMap::new();
for (uuid, mut stored_zone) in stored_zones {
let previous_zone = stored_zone.clone();
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
match runtime_zone {
None => {
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
if status.state() == ZoneState::Created {
status.state = ZoneState::Creating.into();
}
stored_zone.status = Some(status);
}
Some(runtime) => {
self.zlt.associate(uuid, runtime.domid).await;
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
if let Some(code) = runtime.state.exit_code {
status.state = ZoneState::Exited.into();
status.exit_status = Some(ZoneExitStatus { code });
} else {
status.state = ZoneState::Created.into();
}
for device in &stored_zone
.spec
.as_ref()
.cloned()
.unwrap_or_default()
.devices
{
device_claims.insert(device.name.clone(), uuid);
}
if let Some(reservation) = self.ip_assignment.retrieve(uuid).await? {
status.network_status =
Some(network_reservation_to_network_status(&reservation));
}
stored_zone.status = Some(status);
}
}
let changed = stored_zone != previous_zone;
if changed || initial {
self.zones.update(uuid, stored_zone).await?;
let _ = self.zone_reconciler_notify.try_send(uuid);
}
}
self.devices.update_claims(device_claims).await?;
Ok(())
}
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
let _runtime_reconcile_permit = self.zone_reconcile_lock.read().await;
let Some(mut zone) = self.zones.read(uuid).await? else {
warn!(
"notified of reconcile for zone {} but it didn't exist",
uuid
);
return Ok(false);
};
info!("reconciling zone {}", uuid);
self.events
.send(DaemonEvent::ZoneChanged(ZoneChangedEvent {
zone: Some(zone.clone()),
}))?;
let start_state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
let result = match start_state {
ZoneState::Creating => self.create(uuid, &mut zone).await,
ZoneState::Exited => self.exited(&mut zone).await,
ZoneState::Destroying => self.destroy(uuid, &mut zone).await,
_ => Ok(ZoneReconcilerResult::Unchanged),
};
let result = match result {
Ok(result) => result,
Err(error) => {
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
zone.status.as_mut().unwrap().state = ZoneState::Failed.into();
zone.status.as_mut().unwrap().error_status = Some(ZoneErrorStatus {
message: error.to_string(),
});
warn!("failed to start zone {}: {}", zone.id, error);
ZoneReconcilerResult::Changed { rerun: false }
}
};
info!("reconciled zone {}", uuid);
let state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
let destroyed = state == ZoneState::Destroyed;
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
zone: Some(zone.clone()),
});
if destroyed {
self.zones.remove(uuid).await?;
let mut map = self.tasks.write().await;
map.remove(&uuid);
} else {
self.zones.update(uuid, zone.clone()).await?;
}
self.events.send(event)?;
rerun
} else {
false
};
Ok(rerun)
}
async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
let starter = ZoneCreator {
devices: &self.devices,
kernel_path: &self.kernel_path,
initrd_path: &self.initrd_path,
addons_path: &self.addons_path,
packer: &self.packer,
network_assignment: &self.ip_assignment,
zlt: &self.zlt,
runtime: &self.runtime,
config: &self.config,
};
starter.create(uuid, zone).await
}
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
if let Some(ref mut status) = zone.status {
status.set_state(ZoneState::Destroying);
Ok(ZoneReconcilerResult::Changed { rerun: true })
} else {
Ok(ZoneReconcilerResult::Unchanged)
}
}
async fn destroy(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
if let Err(error) = self.runtime.destroy(uuid).await {
trace!("failed to destroy runtime zone {}: {}", uuid, error);
}
let domid = zone.status.as_ref().map(|x| x.domid);
if let Some(domid) = domid {
self.zlt.remove(uuid, domid).await;
}
info!("destroyed zone {}", uuid);
self.ip_assignment.recall(uuid).await?;
zone.status = Some(ZoneStatus {
state: ZoneState::Destroyed.into(),
network_status: None,
exit_status: None,
error_status: None,
resource_status: None,
host: self.zlt.host_uuid().to_string(),
domid: domid.unwrap_or(u32::MAX),
});
self.devices.release_all(uuid).await?;
Ok(ZoneReconcilerResult::Changed { rerun: false })
}
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
let mut map = self.tasks.write().await;
match map.entry(uuid) {
Entry::Occupied(_) => {}
Entry::Vacant(entry) => {
entry.insert(self.launch_task(uuid).await?);
}
}
Ok(())
}
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
let this = self.clone();
let (sender, mut receiver) = channel(10);
tokio::task::spawn(async move {
'notify_loop: loop {
if receiver.recv().await.is_none() {
break 'notify_loop;
}
'rerun_loop: loop {
let rerun = match this.reconcile(uuid).await {
Ok(rerun) => rerun,
Err(error) => {
error!("failed to reconcile zone {}: {}", uuid, error);
false
}
};
if rerun {
continue 'rerun_loop;
}
break 'rerun_loop;
}
}
});
Ok(ZoneReconcilerEntry { sender })
}
}
pub fn network_reservation_to_network_status(ip: &NetworkReservation) -> ZoneNetworkStatus {
ZoneNetworkStatus {
zone_ipv4: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
zone_ipv6: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
zone_mac: ip.mac.to_string().to_lowercase().replace('-', ":"),
gateway_ipv4: format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
gateway_ipv6: format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
gateway_mac: ip.gateway_mac.to_string().to_lowercase().replace('-', ":"),
}
}

View File

@ -1,69 +0,0 @@
use std::{collections::HashMap, sync::Arc};
use tokio::sync::RwLock;
use uuid::Uuid;
struct ZoneLookupTableState {
domid_to_uuid: HashMap<u32, Uuid>,
uuid_to_domid: HashMap<Uuid, u32>,
}
impl ZoneLookupTableState {
pub fn new(host_uuid: Uuid) -> Self {
let mut domid_to_uuid = HashMap::new();
let mut uuid_to_domid = HashMap::new();
domid_to_uuid.insert(0, host_uuid);
uuid_to_domid.insert(host_uuid, 0);
ZoneLookupTableState {
domid_to_uuid,
uuid_to_domid,
}
}
}
#[derive(Clone)]
pub struct ZoneLookupTable {
host_domid: u32,
host_uuid: Uuid,
state: Arc<RwLock<ZoneLookupTableState>>,
}
impl ZoneLookupTable {
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
ZoneLookupTable {
host_domid,
host_uuid,
state: Arc::new(RwLock::new(ZoneLookupTableState::new(host_uuid))),
}
}
pub fn host_uuid(&self) -> Uuid {
self.host_uuid
}
pub fn host_domid(&self) -> u32 {
self.host_domid
}
pub async fn lookup_uuid_by_domid(&self, domid: u32) -> Option<Uuid> {
let state = self.state.read().await;
state.domid_to_uuid.get(&domid).cloned()
}
pub async fn lookup_domid_by_uuid(&self, uuid: &Uuid) -> Option<u32> {
let state = self.state.read().await;
state.uuid_to_domid.get(uuid).cloned()
}
pub async fn associate(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.insert(uuid, domid);
state.domid_to_uuid.insert(domid, uuid);
}
pub async fn remove(&self, uuid: Uuid, domid: u32) {
let mut state = self.state.write().await;
state.uuid_to_domid.remove(&uuid);
state.domid_to_uuid.remove(&domid);
}
}

View File

@ -1,45 +0,0 @@
[package]
name = "krata"
description = "Client library and common services for the krata isolation engine"
license.workspace = true
version.workspace = true
homepage.workspace = true
repository.workspace = true
edition = "2021"
resolver = "2"
[dependencies]
anyhow = { workspace = true }
async-trait = { workspace = true }
bytes = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
once_cell = { workspace = true }
pin-project-lite = { workspace = true }
prost = { workspace = true }
prost-reflect = { workspace = true }
prost-types = { workspace = true }
scopeguard = { workspace = true }
serde = { workspace = true }
tonic = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
tower = { workspace = true }
url = { workspace = true }
[target.'cfg(unix)'.dependencies]
hyper = { workspace = true }
hyper-util = { workspace = true }
nix = { workspace = true, features = ["term"] }
[build-dependencies]
tonic-build = { workspace = true }
prost-build = { workspace = true }
prost-reflect-build = { workspace = true }
[lib]
name = "krata"
[[example]]
name = "ethtool"
path = "examples/ethtool.rs"

View File

@ -1,26 +0,0 @@
use std::io::Result;
fn main() -> Result<()> {
let mut config = prost_build::Config::new();
prost_reflect_build::Builder::new()
.descriptor_pool("crate::DESCRIPTOR_POOL")
.configure(
&mut config,
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
tonic_build::configure().compile_with_config(
config,
&[
"proto/krata/v1/control.proto",
"proto/krata/idm/transport.proto",
"proto/krata/idm/internal.proto",
],
&["proto/"],
)?;
Ok(())
}

View File

@ -1,13 +0,0 @@
use std::env;
use anyhow::Result;
use krata::ethtool::EthtoolHandle;
fn main() -> Result<()> {
let args = env::args().collect::<Vec<String>>();
let interface = args.get(1).unwrap();
let mut handle = EthtoolHandle::new()?;
handle.set_gso(interface, false)?;
handle.set_tso(interface, false)?;
Ok(())
}

View File

@ -1,98 +0,0 @@
syntax = "proto3";
package krata.idm.internal;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.internal";
option java_outer_classname = "IdmInternalProto";
import "google/protobuf/struct.proto";
message ExitEvent {
int32 code = 1;
}
message PingRequest {}
message PingResponse {}
message MetricsRequest {}
message MetricsResponse {
MetricNode root = 1;
}
message MetricNode {
string name = 1;
google.protobuf.Value value = 2;
MetricFormat format = 3;
repeated MetricNode children = 4;
}
enum MetricFormat {
METRIC_FORMAT_UNKNOWN = 0;
METRIC_FORMAT_BYTES = 1;
METRIC_FORMAT_INTEGER = 2;
METRIC_FORMAT_DURATION_SECONDS = 3;
}
message ExecEnvVar {
string key = 1;
string value = 2;
}
message ExecStreamRequestStart {
repeated ExecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
bool tty = 4;
ExecStreamRequestTerminalSize terminal_size = 5;
}
message ExecStreamRequestStdin {
bytes data = 1;
bool closed = 2;
}
message ExecStreamRequestTerminalSize {
uint32 rows = 1;
uint32 columns = 2;
}
message ExecStreamRequestUpdate {
oneof update {
ExecStreamRequestStart start = 1;
ExecStreamRequestStdin stdin = 2;
ExecStreamRequestTerminalSize terminal_resize = 3;
}
}
message ExecStreamResponseUpdate {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message Event {
oneof event {
ExitEvent exit = 1;
}
}
message Request {
oneof request {
PingRequest ping = 1;
MetricsRequest metrics = 2;
ExecStreamRequestUpdate exec_stream = 3;
}
}
message Response {
oneof response {
PingResponse ping = 1;
MetricsResponse metrics = 2;
ExecStreamResponseUpdate exec_stream = 3;
}
}

View File

@ -1,27 +0,0 @@
syntax = "proto3";
package krata.idm.transport;
option java_multiple_files = true;
option java_package = "dev.krata.proto.idm.transport";
option java_outer_classname = "IdmTransportProto";
message IdmTransportPacket {
uint64 id = 1;
uint64 channel = 2;
IdmTransportPacketForm form = 3;
bytes data = 4;
}
enum IdmTransportPacketForm {
IDM_TRANSPORT_PACKET_FORM_UNKNOWN = 0;
IDM_TRANSPORT_PACKET_FORM_RAW = 1;
IDM_TRANSPORT_PACKET_FORM_EVENT = 2;
IDM_TRANSPORT_PACKET_FORM_REQUEST = 3;
IDM_TRANSPORT_PACKET_FORM_RESPONSE = 4;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST = 5;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_UPDATE = 6;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_UPDATE = 7;
IDM_TRANSPORT_PACKET_FORM_STREAM_REQUEST_CLOSED = 8;
IDM_TRANSPORT_PACKET_FORM_STREAM_RESPONSE_CLOSED = 9;
}

View File

@ -1,151 +0,0 @@
syntax = "proto3";
package krata.v1.common;
option java_multiple_files = true;
option java_package = "dev.krata.proto.v1.common";
option java_outer_classname = "CommonProto";
import "google/protobuf/struct.proto";
message Zone {
string id = 1;
ZoneSpec spec = 2;
ZoneStatus status = 3;
}
message ZoneSpec {
string name = 1;
ZoneImageSpec image = 2;
// If not specified, defaults to the daemon default kernel.
ZoneImageSpec kernel = 3;
// If not specified, defaults to the daemon default initrd.
ZoneImageSpec initrd = 4;
ZoneResourceSpec initial_resources = 5;
ZoneTaskSpec task = 6;
repeated ZoneSpecAnnotation annotations = 7;
repeated ZoneSpecDevice devices = 8;
ZoneKernelOptionsSpec kernel_options = 9;
}
message ZoneResourceSpec {
uint64 max_memory = 1;
uint64 target_memory = 2;
uint32 max_cpus = 3;
uint32 target_cpus = 4;
}
message ZoneImageSpec {
oneof image {
ZoneOciImageSpec oci = 1;
}
}
message ZoneKernelOptionsSpec {
bool verbose = 1;
string cmdline_append = 2;
}
enum OciImageFormat {
OCI_IMAGE_FORMAT_UNKNOWN = 0;
OCI_IMAGE_FORMAT_SQUASHFS = 1;
OCI_IMAGE_FORMAT_EROFS = 2;
// Tar format is not launchable, and is intended for kernel images.
OCI_IMAGE_FORMAT_TAR = 3;
}
message ZoneOciImageSpec {
string digest = 1;
OciImageFormat format = 2;
}
message ZoneTaskSpec {
repeated ZoneTaskSpecEnvVar environment = 1;
repeated string command = 2;
string working_directory = 3;
bool tty = 4;
}
message ZoneTaskSpecEnvVar {
string key = 1;
string value = 2;
}
message ZoneSpecAnnotation {
string key = 1;
string value = 2;
}
message ZoneSpecDevice {
string name = 1;
}
message ZoneStatus {
ZoneState state = 1;
ZoneNetworkStatus network_status = 2;
ZoneExitStatus exit_status = 3;
ZoneErrorStatus error_status = 4;
string host = 5;
uint32 domid = 6;
ZoneResourceStatus resource_status = 7;
}
enum ZoneState {
ZONE_STATE_UNKNOWN = 0;
ZONE_STATE_CREATING = 1;
ZONE_STATE_CREATED = 2;
ZONE_STATE_EXITED = 3;
ZONE_STATE_DESTROYING = 4;
ZONE_STATE_DESTROYED = 5;
ZONE_STATE_FAILED = 6;
}
message ZoneNetworkStatus {
string zone_ipv4 = 1;
string zone_ipv6 = 2;
string zone_mac = 3;
string gateway_ipv4 = 4;
string gateway_ipv6 = 5;
string gateway_mac = 6;
}
message ZoneExitStatus {
int32 code = 1;
}
message ZoneErrorStatus {
string message = 1;
}
message ZoneResourceStatus {
ZoneResourceSpec active_resources = 1;
}
message ZoneMetricNode {
string name = 1;
google.protobuf.Value value = 2;
ZoneMetricFormat format = 3;
repeated ZoneMetricNode children = 4;
}
enum ZoneMetricFormat {
ZONE_METRIC_FORMAT_UNKNOWN = 0;
ZONE_METRIC_FORMAT_BYTES = 1;
ZONE_METRIC_FORMAT_INTEGER = 2;
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
}
message TerminalSize {
uint32 rows = 1;
uint32 columns = 2;
}
message NetworkReservation {
string uuid = 1;
string ipv4 = 2;
string ipv6 = 3;
string mac = 4;
string gateway_ipv4 = 5;
string gateway_ipv6 = 6;
string gateway_mac = 7;
}

View File

@ -1,275 +0,0 @@
syntax = "proto3";
package krata.v1.control;
option java_multiple_files = true;
option java_package = "dev.krata.proto.v1.control";
option java_outer_classname = "ControlProto";
import "krata/idm/transport.proto";
import "krata/v1/common.proto";
service ControlService {
rpc GetHostStatus(GetHostStatusRequest) returns (GetHostStatusReply);
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
rpc GetHostCpuTopology(GetHostCpuTopologyRequest) returns (GetHostCpuTopologyReply);
rpc SetHostPowerManagementPolicy(SetHostPowerManagementPolicyRequest) returns (SetHostPowerManagementPolicyReply);
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
rpc ListNetworkReservations(ListNetworkReservationsRequest) returns (ListNetworkReservationsReply);
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
rpc ResolveZoneId(ResolveZoneIdRequest) returns (ResolveZoneIdReply);
rpc GetZone(GetZoneRequest) returns (GetZoneReply);
rpc UpdateZoneResources(UpdateZoneResourcesRequest) returns (UpdateZoneResourcesReply);
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
rpc ExecInsideZone(stream ExecInsideZoneRequest) returns (stream ExecInsideZoneReply);
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
rpc ReadHypervisorConsole(ReadHypervisorConsoleRequest) returns (ReadHypervisorConsoleReply);
}
message GetHostStatusRequest {}
message GetHostStatusReply {
string host_uuid = 1;
uint32 host_domid = 2;
string krata_version = 3;
string host_ipv4 = 4;
string host_ipv6 = 5;
string host_mac = 6;
}
message CreateZoneRequest {
krata.v1.common.ZoneSpec spec = 1;
}
message CreateZoneReply {
string zone_id = 1;
}
message DestroyZoneRequest {
string zone_id = 1;
}
message DestroyZoneReply {}
message ResolveZoneIdRequest {
string name = 1;
}
message ResolveZoneIdReply {
string zone_id = 1;
}
message GetZoneRequest {
string zone_id = 1;
}
message GetZoneReply {
krata.v1.common.Zone zone = 1;
}
message ListZonesRequest {}
message ListZonesReply {
repeated krata.v1.common.Zone zones = 1;
}
message ExecInsideZoneRequest {
string zone_id = 1;
krata.v1.common.ZoneTaskSpec task = 2;
bytes stdin = 3;
bool stdin_closed = 4;
krata.v1.common.TerminalSize terminal_size = 5;
}
message ExecInsideZoneReply {
bool exited = 1;
string error = 2;
int32 exit_code = 3;
bytes stdout = 4;
bytes stderr = 5;
}
message ZoneConsoleRequest {
string zone_id = 1;
bytes data = 2;
bool replay_history = 3;
}
message ZoneConsoleReply {
bytes data = 1;
}
message WatchEventsRequest {}
message WatchEventsReply {
oneof event {
ZoneChangedEvent zone_changed = 1;
}
}
message ZoneChangedEvent {
krata.v1.common.Zone zone = 1;
}
message ReadZoneMetricsRequest {
string zone_id = 1;
}
message ReadZoneMetricsReply {
krata.v1.common.ZoneMetricNode root = 1;
}
message SnoopIdmRequest {}
message SnoopIdmReply {
string from = 1;
string to = 2;
krata.idm.transport.IdmTransportPacket packet = 3;
}
message ImageProgress {
ImageProgressPhase phase = 1;
repeated ImageProgressLayer layers = 2;
ImageProgressIndication indication = 3;
}
enum ImageProgressPhase {
IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_PHASE_STARTED = 1;
IMAGE_PROGRESS_PHASE_RESOLVING = 2;
IMAGE_PROGRESS_PHASE_RESOLVED = 3;
IMAGE_PROGRESS_PHASE_CONFIG_DOWNLOAD = 4;
IMAGE_PROGRESS_PHASE_LAYER_DOWNLOAD = 5;
IMAGE_PROGRESS_PHASE_ASSEMBLE = 6;
IMAGE_PROGRESS_PHASE_PACK = 7;
IMAGE_PROGRESS_PHASE_COMPLETE = 8;
}
message ImageProgressLayer {
string id = 1;
ImageProgressLayerPhase phase = 2;
ImageProgressIndication indication = 3;
}
enum ImageProgressLayerPhase {
IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
}
message ImageProgressIndication {
oneof indication {
ImageProgressIndicationBar bar = 1;
ImageProgressIndicationSpinner spinner = 2;
ImageProgressIndicationHidden hidden = 3;
ImageProgressIndicationCompleted completed = 4;
}
}
message ImageProgressIndicationBar {
string message = 1;
uint64 current = 2;
uint64 total = 3;
bool is_bytes = 4;
}
message ImageProgressIndicationSpinner {
string message = 1;
}
message ImageProgressIndicationHidden {}
message ImageProgressIndicationCompleted {
string message = 1;
uint64 total = 2;
bool is_bytes = 3;
}
message PullImageRequest {
string image = 1;
krata.v1.common.OciImageFormat format = 2;
bool overwrite_cache = 3;
bool update = 4;
}
message PullImageReply {
ImageProgress progress = 1;
string digest = 2;
krata.v1.common.OciImageFormat format = 3;
}
message DeviceInfo {
string name = 1;
bool claimed = 2;
string owner = 3;
}
message ListDevicesRequest {}
message ListDevicesReply {
repeated DeviceInfo devices = 1;
}
enum HostCpuTopologyClass {
HOST_CPU_TOPOLOGY_CLASS_STANDARD = 0;
HOST_CPU_TOPOLOGY_CLASS_PERFORMANCE = 1;
HOST_CPU_TOPOLOGY_CLASS_EFFICIENCY = 2;
}
message HostCpuTopologyInfo {
uint32 core = 1;
uint32 socket = 2;
uint32 node = 3;
uint32 thread = 4;
HostCpuTopologyClass class = 5;
}
message GetHostCpuTopologyRequest {}
message GetHostCpuTopologyReply {
repeated HostCpuTopologyInfo cpus = 1;
}
message SetHostPowerManagementPolicyRequest {
string scheduler = 1;
bool smt_awareness = 2;
}
message SetHostPowerManagementPolicyReply {}
message UpdateZoneResourcesRequest {
string zone_id = 1;
krata.v1.common.ZoneResourceSpec resources = 2;
}
message UpdateZoneResourcesReply {}
message ReadHypervisorConsoleRequest {}
message ReadHypervisorConsoleReply {
string data = 1;
}
message ListNetworkReservationsRequest {}
message ListNetworkReservationsReply {
repeated krata.v1.common.NetworkReservation reservations = 1;
}

View File

@ -1,54 +0,0 @@
#[cfg(unix)]
use crate::unix::HyperUnixConnector;
use crate::{dial::ControlDialAddress, v1::control::control_service_client::ControlServiceClient};
#[cfg(not(unix))]
use anyhow::anyhow;
use anyhow::Result;
use tonic::transport::{Channel, ClientTlsConfig, Endpoint};
pub struct ControlClientProvider {}
impl ControlClientProvider {
pub async fn dial(addr: ControlDialAddress) -> Result<ControlServiceClient<Channel>> {
let channel = match addr {
ControlDialAddress::UnixSocket { path } => {
#[cfg(not(unix))]
return Err(anyhow!(
"unix sockets are not supported on this platform (path {})",
path
));
#[cfg(unix)]
ControlClientProvider::dial_unix_socket(path).await?
}
ControlDialAddress::Tcp { host, port } => {
Endpoint::try_from(format!("http://{}:{}", host, port))?
.connect()
.await?
}
ControlDialAddress::Tls {
host,
port,
insecure: _,
} => {
let tls_config = ClientTlsConfig::new().domain_name(&host);
let address = format!("https://{}:{}", host, port);
Channel::from_shared(address)?
.tls_config(tls_config)?
.connect()
.await?
}
};
Ok(ControlServiceClient::new(channel))
}
#[cfg(unix)]
async fn dial_unix_socket(path: String) -> Result<Channel> {
// This URL is not actually used but is required to be specified.
Ok(Endpoint::try_from(format!("unix://localhost/{}", path))?
.connect_with_connector(HyperUnixConnector {})
.await?)
}
}

View File

@ -1,100 +0,0 @@
use std::{fmt::Display, str::FromStr};
use anyhow::anyhow;
use url::{Host, Url};
pub const KRATA_DEFAULT_TCP_PORT: u16 = 4350;
pub const KRATA_DEFAULT_TLS_PORT: u16 = 4353;
#[derive(Clone)]
pub enum ControlDialAddress {
UnixSocket {
path: String,
},
Tcp {
host: String,
port: u16,
},
Tls {
host: String,
port: u16,
insecure: bool,
},
}
impl FromStr for ControlDialAddress {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let url: Url = s.parse()?;
let host = url.host().unwrap_or(Host::Domain("localhost")).to_string();
match url.scheme() {
"unix" => Ok(ControlDialAddress::UnixSocket {
path: url.path().to_string(),
}),
"tcp" => {
let port = url.port().unwrap_or(KRATA_DEFAULT_TCP_PORT);
Ok(ControlDialAddress::Tcp { host, port })
}
"tls" | "tls-insecure" => {
let insecure = url.scheme() == "tls-insecure";
let port = url.port().unwrap_or(KRATA_DEFAULT_TLS_PORT);
Ok(ControlDialAddress::Tls {
host,
port,
insecure,
})
}
_ => Err(anyhow!("unknown control address scheme: {}", url.scheme())),
}
}
}
impl From<ControlDialAddress> for Url {
fn from(val: ControlDialAddress) -> Self {
match val {
ControlDialAddress::UnixSocket { path } => {
let mut url = Url::parse("unix:///").unwrap();
url.set_path(&path);
url
}
ControlDialAddress::Tcp { host, port } => {
let mut url = Url::parse("tcp://").unwrap();
url.set_host(Some(&host)).unwrap();
if port != KRATA_DEFAULT_TCP_PORT {
url.set_port(Some(port)).unwrap();
}
url
}
ControlDialAddress::Tls {
host,
port,
insecure,
} => {
let mut url = Url::parse("tls://").unwrap();
if insecure {
url.set_scheme("tls-insecure").unwrap();
}
url.set_host(Some(&host)).unwrap();
if port != KRATA_DEFAULT_TLS_PORT {
url.set_port(Some(port)).unwrap();
}
url
}
}
}
}
impl Display for ControlDialAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let url: Url = self.clone().into();
write!(f, "{}", url)
}
}

View File

@ -1,81 +0,0 @@
use std::{
os::fd::{AsRawFd, FromRawFd, OwnedFd},
ptr::addr_of_mut,
};
use anyhow::Result;
use libc::{ioctl, socket, AF_INET, SOCK_DGRAM};
#[repr(C)]
struct EthtoolValue {
cmd: u32,
data: u32,
}
const ETHTOOL_SGSO: u32 = 0x00000024;
const ETHTOOL_STSO: u32 = 0x0000001f;
#[cfg(not(target_env = "musl"))]
const SIOCETHTOOL: libc::c_ulong = libc::SIOCETHTOOL;
#[cfg(target_env = "musl")]
const SIOCETHTOOL: libc::c_int = libc::SIOCETHTOOL as i32;
#[repr(C)]
#[derive(Debug)]
struct EthtoolIfreq {
ifr_name: [libc::c_char; libc::IF_NAMESIZE],
ifr_data: libc::uintptr_t,
}
impl EthtoolIfreq {
fn new(interface: &str) -> EthtoolIfreq {
let mut ifreq = EthtoolIfreq {
ifr_name: [0; libc::IF_NAMESIZE],
ifr_data: 0,
};
for (i, byte) in interface.as_bytes().iter().enumerate() {
ifreq.ifr_name[i] = *byte as libc::c_char
}
ifreq
}
fn set_value(&mut self, ptr: *mut libc::c_void) {
self.ifr_data = ptr as libc::uintptr_t;
}
}
pub struct EthtoolHandle {
fd: OwnedFd,
}
impl EthtoolHandle {
pub fn new() -> Result<EthtoolHandle> {
let fd = unsafe { socket(AF_INET, SOCK_DGRAM, 0) };
if fd == -1 {
return Err(std::io::Error::last_os_error().into());
}
Ok(EthtoolHandle {
fd: unsafe { OwnedFd::from_raw_fd(fd) },
})
}
pub fn set_gso(&mut self, interface: &str, value: bool) -> Result<()> {
self.set_value(interface, ETHTOOL_SGSO, if value { 1 } else { 0 })
}
pub fn set_tso(&mut self, interface: &str, value: bool) -> Result<()> {
self.set_value(interface, ETHTOOL_STSO, if value { 1 } else { 0 })
}
fn set_value(&mut self, interface: &str, cmd: u32, value: u32) -> Result<()> {
let mut ifreq = EthtoolIfreq::new(interface);
let mut value = EthtoolValue { cmd, data: value };
ifreq.set_value(addr_of_mut!(value) as *mut libc::c_void);
let result = unsafe { ioctl(self.fd.as_raw_fd(), SIOCETHTOOL, addr_of_mut!(ifreq) as u64) };
if result == -1 {
return Err(std::io::Error::last_os_error().into());
}
Ok(())
}
}

View File

@ -1,87 +0,0 @@
use std::{sync::Arc, time::Duration};
use crate::v1::control::{
control_service_client::ControlServiceClient, watch_events_reply::Event, WatchEventsReply,
WatchEventsRequest,
};
use anyhow::Result;
use log::{error, trace, warn};
use tokio::{sync::broadcast, task::JoinHandle, time::sleep};
use tokio_stream::StreamExt;
use tonic::{transport::Channel, Streaming};
#[derive(Clone)]
pub struct EventStream {
sender: Arc<broadcast::Sender<Event>>,
task: Arc<JoinHandle<()>>,
}
impl EventStream {
pub async fn open(client: ControlServiceClient<Channel>) -> Result<Self> {
let (sender, _) = broadcast::channel(1000);
let emit = sender.clone();
let task = tokio::task::spawn(async move {
if let Err(error) = EventStream::process(client, emit).await {
error!("failed to process event stream: {}", error);
}
});
Ok(Self {
sender: Arc::new(sender),
task: Arc::new(task),
})
}
async fn process(
mut client: ControlServiceClient<Channel>,
emit: broadcast::Sender<Event>,
) -> Result<()> {
let mut events: Option<Streaming<WatchEventsReply>> = None;
loop {
let mut stream = match events {
Some(stream) => stream,
None => {
let result = client.watch_events(WatchEventsRequest {}).await;
if let Err(error) = result {
warn!("failed to watch events: {}", error);
sleep(Duration::from_secs(1)).await;
continue;
}
result.unwrap().into_inner()
}
};
let Some(result) = stream.next().await else {
events = None;
continue;
};
let reply = match result {
Ok(reply) => reply,
Err(error) => {
trace!("event stream processing failed: {}", error);
events = None;
continue;
}
};
let Some(event) = reply.event else {
events = Some(stream);
continue;
};
let _ = emit.send(event);
events = Some(stream);
}
}
pub fn subscribe(&self) -> broadcast::Receiver<Event> {
self.sender.subscribe()
}
}
impl Drop for EventStream {
fn drop(&mut self) {
if Arc::strong_count(&self.task) <= 1 {
self.task.abort();
}
}
}

Some files were not shown because too many files have changed in this diff Show More