mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 13:11:31 +00:00
Compare commits
50 Commits
Author | SHA1 | Date | |
---|---|---|---|
3cb0e214e9 | |||
0e0c5264eb | |||
19f35ef20a | |||
79e27256e6 | |||
b6c726e7aa | |||
0d2b7a3ae3 | |||
f1e3d59b6a | |||
0106b85de9 | |||
96ccbd50bb | |||
41aa1aa707 | |||
ec74bc8d2b | |||
694de5d1fd | |||
f2db826ba6 | |||
7f5609a846 | |||
adb7b29354 | |||
bd448ee8d9 | |||
1647a07226 | |||
151b43eeec | |||
1123a1a50a | |||
6a6b5b6e0b | |||
274136825a | |||
2ab2cda937 | |||
2519d76479 | |||
dbeb8bf43b | |||
6093627bdd | |||
1d75dfb88a | |||
18bf370f74 | |||
506d2ccf46 | |||
6096dee2fe | |||
bf3b73bf24 | |||
87530edf70 | |||
1dca770091 | |||
01a94ad23e | |||
2a107a370f | |||
313d3f72a5 | |||
5ec3d9d5c1 | |||
1cf03a460e | |||
ffc9dcc0ea | |||
0358c9c775 | |||
dcffaf110e | |||
b81ae5d01a | |||
1756bc6647 | |||
6bf3741ec9 | |||
b7d41ee9f4 | |||
53059e8cca | |||
11bb99b1e4 | |||
eaa84089ce | |||
680244fc5e | |||
d469da4d9b | |||
99091df3cf |
17
.github/workflows/check.yml
vendored
17
.github/workflows/check.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -26,15 +26,14 @@ jobs:
|
|||||||
rustup component add rustfmt
|
rustup component add rustfmt
|
||||||
- name: install linux dependencies
|
- name: install linux dependencies
|
||||||
run: ./hack/ci/install-linux-deps.sh
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
# Temporarily ignored: https://github.com/edera-dev/krata/issues/206
|
|
||||||
- name: cargo fmt
|
- name: cargo fmt
|
||||||
run: ./hack/build/cargo.sh fmt --all -- --check || true
|
run: ./hack/build/cargo.sh fmt --all -- --check
|
||||||
shellcheck:
|
shellcheck:
|
||||||
name: shellcheck
|
name: shellcheck
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -56,7 +55,7 @@ jobs:
|
|||||||
name: full build linux-${{ matrix.arch }}
|
name: full build linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -84,7 +83,7 @@ jobs:
|
|||||||
name: full test linux-${{ matrix.arch }}
|
name: full test linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -111,7 +110,7 @@ jobs:
|
|||||||
name: full clippy linux-${{ matrix.arch }}
|
name: full clippy linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -139,7 +138,7 @@ jobs:
|
|||||||
name: zone initrd linux-${{ matrix.arch }}
|
name: zone initrd linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -176,7 +175,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: configure git line endings
|
- name: configure git line endings
|
||||||
|
20
.github/workflows/nightly.yml
vendored
20
.github/workflows/nightly.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
|||||||
name: nightly full build linux-${{ matrix.arch }}
|
name: nightly full build linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -37,7 +37,7 @@ jobs:
|
|||||||
- name: build systemd bundle
|
- name: build systemd bundle
|
||||||
run: ./hack/dist/bundle.sh
|
run: ./hack/dist/bundle.sh
|
||||||
- name: upload systemd bundle
|
- name: upload systemd bundle
|
||||||
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||||
with:
|
with:
|
||||||
name: krata-bundle-systemd-${{ matrix.arch }}
|
name: krata-bundle-systemd-${{ matrix.arch }}
|
||||||
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
||||||
@ -45,7 +45,7 @@ jobs:
|
|||||||
- name: build deb package
|
- name: build deb package
|
||||||
run: ./hack/dist/deb.sh
|
run: ./hack/dist/deb.sh
|
||||||
- name: upload deb package
|
- name: upload deb package
|
||||||
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||||
with:
|
with:
|
||||||
name: krata-debian-${{ matrix.arch }}
|
name: krata-debian-${{ matrix.arch }}
|
||||||
path: "target/dist/*.deb"
|
path: "target/dist/*.deb"
|
||||||
@ -53,7 +53,7 @@ jobs:
|
|||||||
- name: build apk package
|
- name: build apk package
|
||||||
run: ./hack/dist/apk.sh
|
run: ./hack/dist/apk.sh
|
||||||
- name: upload apk package
|
- name: upload apk package
|
||||||
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||||
with:
|
with:
|
||||||
name: krata-alpine-${{ matrix.arch }}
|
name: krata-alpine-${{ matrix.arch }}
|
||||||
path: "target/dist/*_${{ matrix.arch }}.apk"
|
path: "target/dist/*_${{ matrix.arch }}.apk"
|
||||||
@ -79,7 +79,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: configure git line endings
|
- name: configure git line endings
|
||||||
@ -104,13 +104,13 @@ jobs:
|
|||||||
- name: cargo build kratactl
|
- name: cargo build kratactl
|
||||||
run: ./hack/build/cargo.sh build --release --bin kratactl
|
run: ./hack/build/cargo.sh build --release --bin kratactl
|
||||||
- name: upload kratactl
|
- name: upload kratactl
|
||||||
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||||
with:
|
with:
|
||||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
path: "target/*/release/kratactl"
|
path: "target/*/release/kratactl"
|
||||||
if: ${{ matrix.platform.os != 'windows' }}
|
if: ${{ matrix.platform.os != 'windows' }}
|
||||||
- name: upload kratactl
|
- name: upload kratactl
|
||||||
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
|
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||||
with:
|
with:
|
||||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
path: "target/*/release/kratactl.exe"
|
path: "target/*/release/kratactl.exe"
|
||||||
@ -132,7 +132,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -140,7 +140,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- name: install cosign
|
- name: install cosign
|
||||||
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
|
||||||
- name: setup docker buildx
|
- name: setup docker buildx
|
||||||
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
||||||
- name: login to container registry
|
- name: login to container registry
|
||||||
@ -150,7 +150,7 @@ jobs:
|
|||||||
username: "${{ github.actor }}"
|
username: "${{ github.actor }}"
|
||||||
password: "${{ secrets.GITHUB_TOKEN }}"
|
password: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
- name: docker build and push ${{ matrix.component }}
|
- name: docker build and push ${{ matrix.component }}
|
||||||
uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
|
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
|
||||||
id: push
|
id: push
|
||||||
with:
|
with:
|
||||||
file: ./images/Dockerfile.${{ matrix.component }}
|
file: ./images/Dockerfile.${{ matrix.component }}
|
||||||
|
10
.github/workflows/release-assets.yml
vendored
10
.github/workflows/release-assets.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -81,7 +81,7 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -129,7 +129,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: checkout repository
|
- name: checkout repository
|
||||||
@ -137,7 +137,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- name: install cosign
|
- name: install cosign
|
||||||
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
|
||||||
- name: setup docker buildx
|
- name: setup docker buildx
|
||||||
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
||||||
- name: login to container registry
|
- name: login to container registry
|
||||||
@ -151,7 +151,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
|
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
|
||||||
- name: docker build and push ${{ matrix.component }}
|
- name: docker build and push ${{ matrix.component }}
|
||||||
uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
|
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
|
||||||
id: push
|
id: push
|
||||||
with:
|
with:
|
||||||
file: ./images/Dockerfile.${{ matrix.component }}
|
file: ./images/Dockerfile.${{ matrix.component }}
|
||||||
|
4
.github/workflows/release-plz.yml
vendored
4
.github/workflows/release-plz.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- name: generate cultivator token
|
- name: generate cultivator token
|
||||||
@ -37,7 +37,7 @@ jobs:
|
|||||||
- name: install linux dependencies
|
- name: install linux dependencies
|
||||||
run: ./hack/ci/install-linux-deps.sh
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
- name: release-plz
|
- name: release-plz
|
||||||
uses: MarcoIeni/release-plz-action@92ae919a6b3e27c0472659e3a7414ff4a00e833f # v0.5.64
|
uses: MarcoIeni/release-plz-action@e28810957ef1fedfa89b5e9692e750ce45f62a67 # v0.5.65
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
|
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
|
||||||
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"
|
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"
|
||||||
|
66
CHANGELOG.md
66
CHANGELOG.md
@ -6,6 +6,72 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.0.20](https://github.com/edera-dev/krata/compare/v0.0.19...v0.0.20) - 2024-08-27
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(krata)* implement network reservation list ([#366](https://github.com/edera-dev/krata/pull/366))
|
||||||
|
- *(zone-exec)* implement terminal resize support ([#363](https://github.com/edera-dev/krata/pull/363))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- update Cargo.toml dependencies
|
||||||
|
|
||||||
|
## [0.0.19](https://github.com/edera-dev/krata/compare/v0.0.18...v0.0.19) - 2024-08-25
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(config)* write default config to config.toml on startup ([#356](https://github.com/edera-dev/krata/pull/356))
|
||||||
|
- *(ctl)* add --format option to host status and improve cpu topology format ([#355](https://github.com/edera-dev/krata/pull/355))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- *(zone-exec)* ensure that the underlying process is killed when rpc is closed ([#361](https://github.com/edera-dev/krata/pull/361))
|
||||||
|
- *(rpc)* rename HostStatus to GetHostStatus ([#360](https://github.com/edera-dev/krata/pull/360))
|
||||||
|
- *(console)* don't replay history when attaching to the console ([#358](https://github.com/edera-dev/krata/pull/358))
|
||||||
|
- *(zone-exec)* catch panic errors and show all errors immediately ([#359](https://github.com/edera-dev/krata/pull/359))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- *(control)* split out all of the rpc calls into their own files ([#357](https://github.com/edera-dev/krata/pull/357))
|
||||||
|
|
||||||
|
## [0.0.18](https://github.com/edera-dev/krata/compare/v0.0.17...v0.0.18) - 2024-08-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(zone)* kernel command line control on launch ([#351](https://github.com/edera-dev/krata/pull/351))
|
||||||
|
- *(xen-preflight)* test for hypervisor presence explicitly and error if missing ([#347](https://github.com/edera-dev/krata/pull/347))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- *(network)* allocate host ip from allocation pool ([#353](https://github.com/edera-dev/krata/pull/353))
|
||||||
|
- *(daemon)* turn off trace logging ([#352](https://github.com/edera-dev/krata/pull/352))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- Add support for reading hypervisor console ([#344](https://github.com/edera-dev/krata/pull/344))
|
||||||
|
- *(ctl)* move logic for branching ctl run steps into ControlCommands ([#342](https://github.com/edera-dev/krata/pull/342))
|
||||||
|
- update Cargo.toml dependencies
|
||||||
|
|
||||||
|
## [0.0.17](https://github.com/edera-dev/krata/compare/v0.0.16...v0.0.17) - 2024-08-15
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(krata)* first pass on cpu hotplug support ([#340](https://github.com/edera-dev/krata/pull/340))
|
||||||
|
- *(exec)* implement tty support (fixes [#335](https://github.com/edera-dev/krata/pull/335)) ([#336](https://github.com/edera-dev/krata/pull/336))
|
||||||
|
- *(krata)* dynamic resource allocation (closes [#298](https://github.com/edera-dev/krata/pull/298)) ([#333](https://github.com/edera-dev/krata/pull/333))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- update Cargo.toml dependencies
|
||||||
|
|
||||||
|
## [0.0.16](https://github.com/edera-dev/krata/compare/v0.0.15...v0.0.16) - 2024-08-14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(krata)* prepare for workload rework ([#276](https://github.com/edera-dev/krata/pull/276))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- *(idm)* reimplement packet processing algorithm ([#330](https://github.com/edera-dev/krata/pull/330))
|
||||||
|
- *(power-trap-eacces)* gracefully handle hypercall errors in power management ([#325](https://github.com/edera-dev/krata/pull/325))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- *(o11y)* add more debug logs to daemon & runtime ([#318](https://github.com/edera-dev/krata/pull/318))
|
||||||
|
|
||||||
|
## [0.0.15](https://github.com/edera-dev/krata/compare/v0.0.14...v0.0.15) - 2024-08-06
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- *(zone)* waitpid should be limited when no child processes exist (fixes [#304](https://github.com/edera-dev/krata/pull/304)) ([#305](https://github.com/edera-dev/krata/pull/305))
|
||||||
|
|
||||||
## [0.0.14](https://github.com/edera-dev/krata/compare/v0.0.13...v0.0.14) - 2024-08-06
|
## [0.0.14](https://github.com/edera-dev/krata/compare/v0.0.13...v0.0.14) - 2024-08-06
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
1029
Cargo.lock
generated
1029
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@ -18,14 +18,14 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.0.14"
|
version = "0.0.20"
|
||||||
homepage = "https://krata.dev"
|
homepage = "https://krata.dev"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
repository = "https://github.com/edera-dev/krata"
|
repository = "https://github.com/edera-dev/krata"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
arrayvec = "0.7.4"
|
arrayvec = "0.7.6"
|
||||||
async-compression = "0.4.12"
|
async-compression = "0.4.12"
|
||||||
async-stream = "0.3.5"
|
async-stream = "0.3.5"
|
||||||
async-trait = "0.1.81"
|
async-trait = "0.1.81"
|
||||||
@ -37,8 +37,8 @@ c2rust-bitfields = "0.18.0"
|
|||||||
cgroups-rs = "0.3.4"
|
cgroups-rs = "0.3.4"
|
||||||
circular-buffer = "0.1.7"
|
circular-buffer = "0.1.7"
|
||||||
comfy-table = "7.1.1"
|
comfy-table = "7.1.1"
|
||||||
crossterm = "0.27.0"
|
crossterm = "0.28.1"
|
||||||
ctrlc = "3.4.4"
|
ctrlc = "3.4.5"
|
||||||
elf = "0.7.4"
|
elf = "0.7.4"
|
||||||
env_logger = "0.11.5"
|
env_logger = "0.11.5"
|
||||||
etherparse = "0.15.0"
|
etherparse = "0.15.0"
|
||||||
@ -46,9 +46,9 @@ fancy-duration = "0.9.2"
|
|||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
futures = "0.3.30"
|
futures = "0.3.30"
|
||||||
hyper = "1.4.1"
|
hyper = "1.4.1"
|
||||||
hyper-util = "0.1.6"
|
hyper-util = "0.1.7"
|
||||||
human_bytes = "0.4"
|
human_bytes = "0.4"
|
||||||
indexmap = "2.3.0"
|
indexmap = "2.4.0"
|
||||||
indicatif = "0.17.8"
|
indicatif = "0.17.8"
|
||||||
ipnetwork = "0.20.0"
|
ipnetwork = "0.20.0"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
@ -68,32 +68,34 @@ prost = "0.13.1"
|
|||||||
prost-build = "0.13.1"
|
prost-build = "0.13.1"
|
||||||
prost-reflect-build = "0.14.0"
|
prost-reflect-build = "0.14.0"
|
||||||
prost-types = "0.13.1"
|
prost-types = "0.13.1"
|
||||||
|
pty-process = "0.4.0"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
ratatui = "0.27.0"
|
ratatui = "0.28.1"
|
||||||
redb = "2.1.1"
|
redb = "2.1.2"
|
||||||
regex = "1.10.6"
|
regex = "1.10.6"
|
||||||
rtnetlink = "0.14.1"
|
rtnetlink = "0.14.1"
|
||||||
scopeguard = "1.2.0"
|
scopeguard = "1.2.0"
|
||||||
serde_json = "1.0.122"
|
serde_json = "1.0.127"
|
||||||
serde_yaml = "0.9"
|
serde_yaml = "0.9"
|
||||||
sha256 = "1.5.0"
|
sha256 = "1.5.0"
|
||||||
signal-hook = "0.3.17"
|
signal-hook = "0.3.17"
|
||||||
slice-copy = "0.3.0"
|
slice-copy = "0.3.0"
|
||||||
smoltcp = "0.11.0"
|
smoltcp = "0.11.0"
|
||||||
sysinfo = "0.30.13"
|
sysinfo = "0.31.3"
|
||||||
termtree = "0.5.1"
|
termtree = "0.5.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tokio-tun = "0.11.5"
|
tokio-tun = "0.11.5"
|
||||||
|
tokio-util = "0.7.11"
|
||||||
toml = "0.8.19"
|
toml = "0.8.19"
|
||||||
tonic-build = "0.12.1"
|
tonic-build = "0.12.2"
|
||||||
tower = "0.4.13"
|
tower = "0.5.0"
|
||||||
udp-stream = "0.0.12"
|
udp-stream = "0.0.12"
|
||||||
url = "2.5.2"
|
url = "2.5.2"
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
xz2 = "0.1"
|
xz2 = "0.1"
|
||||||
|
|
||||||
[workspace.dependencies.clap]
|
[workspace.dependencies.clap]
|
||||||
version = "4.5.13"
|
version = "4.5.16"
|
||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.prost-reflect]
|
[workspace.dependencies.prost-reflect]
|
||||||
@ -101,12 +103,12 @@ version = "0.14.0"
|
|||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.reqwest]
|
[workspace.dependencies.reqwest]
|
||||||
version = "0.12.5"
|
version = "0.12.7"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["rustls-tls"]
|
features = ["rustls-tls"]
|
||||||
|
|
||||||
[workspace.dependencies.serde]
|
[workspace.dependencies.serde]
|
||||||
version = "1.0.204"
|
version = "1.0.209"
|
||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.sys-mount]
|
[workspace.dependencies.sys-mount]
|
||||||
@ -114,7 +116,7 @@ version = "3.0.0"
|
|||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.tokio]
|
[workspace.dependencies.tokio]
|
||||||
version = "1.39.2"
|
version = "1.39.3"
|
||||||
features = ["full"]
|
features = ["full"]
|
||||||
|
|
||||||
[workspace.dependencies.tokio-stream]
|
[workspace.dependencies.tokio-stream]
|
||||||
@ -122,7 +124,7 @@ version = "0.1"
|
|||||||
features = ["io-util", "net"]
|
features = ["io-util", "net"]
|
||||||
|
|
||||||
[workspace.dependencies.tonic]
|
[workspace.dependencies.tonic]
|
||||||
version = "0.12.1"
|
version = "0.12.2"
|
||||||
features = ["tls"]
|
features = ["tls"]
|
||||||
|
|
||||||
[workspace.dependencies.uuid]
|
[workspace.dependencies.uuid]
|
||||||
|
@ -16,7 +16,7 @@ oci-spec = { workspace = true }
|
|||||||
scopeguard = { workspace = true }
|
scopeguard = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-stream = { workspace = true }
|
tokio-stream = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.14" }
|
krata-oci = { path = "../oci", version = "^0.0.20" }
|
||||||
krata-tokio-tar = { workspace = true }
|
krata-tokio-tar = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ env_logger = { workspace = true }
|
|||||||
fancy-duration = { workspace = true }
|
fancy-duration = { workspace = true }
|
||||||
human_bytes = { workspace = true }
|
human_bytes = { workspace = true }
|
||||||
indicatif = { workspace = true }
|
indicatif = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.14" }
|
krata = { path = "../krata", version = "^0.0.20" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost-reflect = { workspace = true, features = ["serde"] }
|
prost-reflect = { workspace = true, features = ["serde"] }
|
||||||
prost-types = { workspace = true }
|
prost-types = { workspace = true }
|
||||||
|
@ -23,7 +23,7 @@ enum DeviceListFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "List the devices on the isolation engine")]
|
#[command(about = "List device information")]
|
||||||
pub struct DeviceListCommand {
|
pub struct DeviceListCommand {
|
||||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
format: DeviceListFormat,
|
format: DeviceListFormat,
|
||||||
|
@ -3,9 +3,11 @@ use clap::{Parser, ValueEnum};
|
|||||||
use comfy_table::presets::UTF8_FULL_CONDENSED;
|
use comfy_table::presets::UTF8_FULL_CONDENSED;
|
||||||
use comfy_table::{Cell, Table};
|
use comfy_table::{Cell, Table};
|
||||||
use krata::v1::control::{
|
use krata::v1::control::{
|
||||||
control_service_client::ControlServiceClient, HostCpuTopologyClass, HostCpuTopologyRequest,
|
control_service_client::ControlServiceClient, GetHostCpuTopologyRequest, HostCpuTopologyClass,
|
||||||
};
|
};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
fn class_to_str(input: HostCpuTopologyClass) -> String {
|
fn class_to_str(input: HostCpuTopologyClass) -> String {
|
||||||
@ -19,6 +21,11 @@ fn class_to_str(input: HostCpuTopologyClass) -> String {
|
|||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum HostCpuTopologyFormat {
|
enum HostCpuTopologyFormat {
|
||||||
Table,
|
Table,
|
||||||
|
Json,
|
||||||
|
JsonPretty,
|
||||||
|
Jsonl,
|
||||||
|
Yaml,
|
||||||
|
KeyValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@ -31,28 +38,65 @@ pub struct HostCpuTopologyCommand {
|
|||||||
impl HostCpuTopologyCommand {
|
impl HostCpuTopologyCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let response = client
|
let response = client
|
||||||
.get_host_cpu_topology(Request::new(HostCpuTopologyRequest {}))
|
.get_host_cpu_topology(Request::new(GetHostCpuTopologyRequest {}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
let mut table = Table::new();
|
match self.format {
|
||||||
table.load_preset(UTF8_FULL_CONDENSED);
|
HostCpuTopologyFormat::Table => {
|
||||||
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
let mut table = Table::new();
|
||||||
table.set_header(vec!["id", "node", "socket", "core", "thread", "class"]);
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
|
table.set_header(vec!["id", "node", "socket", "core", "thread", "class"]);
|
||||||
|
|
||||||
for (i, cpu) in response.cpus.iter().enumerate() {
|
for (i, cpu) in response.cpus.iter().enumerate() {
|
||||||
table.add_row(vec![
|
table.add_row(vec![
|
||||||
Cell::new(i),
|
Cell::new(i),
|
||||||
Cell::new(cpu.node),
|
Cell::new(cpu.node),
|
||||||
Cell::new(cpu.socket),
|
Cell::new(cpu.socket),
|
||||||
Cell::new(cpu.core),
|
Cell::new(cpu.core),
|
||||||
Cell::new(cpu.thread),
|
Cell::new(cpu.thread),
|
||||||
Cell::new(class_to_str(cpu.class())),
|
Cell::new(class_to_str(cpu.class())),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if !table.is_empty() {
|
if !table.is_empty() {
|
||||||
println!("{}", table);
|
println!("{}", table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HostCpuTopologyFormat::Json
|
||||||
|
| HostCpuTopologyFormat::JsonPretty
|
||||||
|
| HostCpuTopologyFormat::Yaml => {
|
||||||
|
let mut values = Vec::new();
|
||||||
|
for cpu in response.cpus {
|
||||||
|
let message = proto2dynamic(cpu)?;
|
||||||
|
values.push(serde_json::to_value(message)?);
|
||||||
|
}
|
||||||
|
let value = Value::Array(values);
|
||||||
|
let encoded = if self.format == HostCpuTopologyFormat::JsonPretty {
|
||||||
|
serde_json::to_string_pretty(&value)?
|
||||||
|
} else if self.format == HostCpuTopologyFormat::Yaml {
|
||||||
|
serde_yaml::to_string(&value)?
|
||||||
|
} else {
|
||||||
|
serde_json::to_string(&value)?
|
||||||
|
};
|
||||||
|
println!("{}", encoded.trim());
|
||||||
|
}
|
||||||
|
|
||||||
|
HostCpuTopologyFormat::Jsonl => {
|
||||||
|
for cpu in response.cpus {
|
||||||
|
let message = proto2dynamic(cpu)?;
|
||||||
|
println!("{}", serde_json::to_string(&message)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HostCpuTopologyFormat::KeyValue => {
|
||||||
|
for cpu in response.cpus {
|
||||||
|
let kvs = proto2kv(cpu)?;
|
||||||
|
println!("{}", kv2line(kvs),);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
23
crates/ctl/src/cli/host/hv_console.rs
Normal file
23
crates/ctl/src/cli/host/hv_console.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use krata::v1::control::{
|
||||||
|
control_service_client::ControlServiceClient, ReadHypervisorConsoleRequest,
|
||||||
|
};
|
||||||
|
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Display hypervisor console output")]
|
||||||
|
pub struct HostHvConsoleCommand {}
|
||||||
|
|
||||||
|
impl HostHvConsoleCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let response = client
|
||||||
|
.read_hypervisor_console(Request::new(ReadHypervisorConsoleRequest {}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
|
||||||
|
print!("{}", response.data);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -1,22 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use clap::Parser;
|
|
||||||
use krata::v1::control::{control_service_client::ControlServiceClient, IdentifyHostRequest};
|
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(about = "Identify information about the host")]
|
|
||||||
pub struct HostIdentifyCommand {}
|
|
||||||
|
|
||||||
impl HostIdentifyCommand {
|
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
|
||||||
let response = client
|
|
||||||
.identify_host(Request::new(IdentifyHostRequest {}))
|
|
||||||
.await?
|
|
||||||
.into_inner();
|
|
||||||
println!("Host UUID: {}", response.host_uuid);
|
|
||||||
println!("Host Domain: {}", response.host_domid);
|
|
||||||
println!("Krata Version: {}", response.krata_version);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -6,12 +6,14 @@ use krata::events::EventStream;
|
|||||||
use krata::v1::control::control_service_client::ControlServiceClient;
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
use crate::cli::host::cpu_topology::HostCpuTopologyCommand;
|
use crate::cli::host::cpu_topology::HostCpuTopologyCommand;
|
||||||
use crate::cli::host::identify::HostIdentifyCommand;
|
use crate::cli::host::hv_console::HostHvConsoleCommand;
|
||||||
use crate::cli::host::idm_snoop::HostIdmSnoopCommand;
|
use crate::cli::host::idm_snoop::HostIdmSnoopCommand;
|
||||||
|
use crate::cli::host::status::HostStatusCommand;
|
||||||
|
|
||||||
pub mod cpu_topology;
|
pub mod cpu_topology;
|
||||||
pub mod identify;
|
pub mod hv_console;
|
||||||
pub mod idm_snoop;
|
pub mod idm_snoop;
|
||||||
|
pub mod status;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Manage the host of the isolation engine")]
|
#[command(about = "Manage the host of the isolation engine")]
|
||||||
@ -33,8 +35,9 @@ impl HostCommand {
|
|||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
pub enum HostCommands {
|
pub enum HostCommands {
|
||||||
CpuTopology(HostCpuTopologyCommand),
|
CpuTopology(HostCpuTopologyCommand),
|
||||||
Identify(HostIdentifyCommand),
|
Status(HostStatusCommand),
|
||||||
IdmSnoop(HostIdmSnoopCommand),
|
IdmSnoop(HostIdmSnoopCommand),
|
||||||
|
HvConsole(HostHvConsoleCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HostCommands {
|
impl HostCommands {
|
||||||
@ -46,9 +49,11 @@ impl HostCommands {
|
|||||||
match self {
|
match self {
|
||||||
HostCommands::CpuTopology(cpu_topology) => cpu_topology.run(client).await,
|
HostCommands::CpuTopology(cpu_topology) => cpu_topology.run(client).await,
|
||||||
|
|
||||||
HostCommands::Identify(identify) => identify.run(client).await,
|
HostCommands::Status(status) => status.run(client).await,
|
||||||
|
|
||||||
HostCommands::IdmSnoop(snoop) => snoop.run(client, events).await,
|
HostCommands::IdmSnoop(snoop) => snoop.run(client, events).await,
|
||||||
|
|
||||||
|
HostCommands::HvConsole(hvconsole) => hvconsole.run(client).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
60
crates/ctl/src/cli/host/status.rs
Normal file
60
crates/ctl/src/cli/host/status.rs
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use krata::v1::control::{control_service_client::ControlServiceClient, GetHostStatusRequest};
|
||||||
|
|
||||||
|
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
|
enum HostStatusFormat {
|
||||||
|
Simple,
|
||||||
|
Json,
|
||||||
|
JsonPretty,
|
||||||
|
Yaml,
|
||||||
|
KeyValue,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Get information about the host")]
|
||||||
|
pub struct HostStatusCommand {
|
||||||
|
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||||
|
format: HostStatusFormat,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostStatusCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let response = client
|
||||||
|
.get_host_status(Request::new(GetHostStatusRequest {}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
match self.format {
|
||||||
|
HostStatusFormat::Simple => {
|
||||||
|
println!("Host UUID: {}", response.host_uuid);
|
||||||
|
println!("Host Domain: {}", response.host_domid);
|
||||||
|
println!("Krata Version: {}", response.krata_version);
|
||||||
|
println!("Host IPv4: {}", response.host_ipv4);
|
||||||
|
println!("Host IPv6: {}", response.host_ipv6);
|
||||||
|
println!("Host Ethernet Address: {}", response.host_mac);
|
||||||
|
}
|
||||||
|
|
||||||
|
HostStatusFormat::Json | HostStatusFormat::JsonPretty | HostStatusFormat::Yaml => {
|
||||||
|
let message = proto2dynamic(response)?;
|
||||||
|
let value = serde_json::to_value(message)?;
|
||||||
|
let encoded = if self.format == HostStatusFormat::JsonPretty {
|
||||||
|
serde_json::to_string_pretty(&value)?
|
||||||
|
} else if self.format == HostStatusFormat::Yaml {
|
||||||
|
serde_yaml::to_string(&value)?
|
||||||
|
} else {
|
||||||
|
serde_json::to_string(&value)?
|
||||||
|
};
|
||||||
|
println!("{}", encoded.trim());
|
||||||
|
}
|
||||||
|
|
||||||
|
HostStatusFormat::KeyValue => {
|
||||||
|
let kvs = proto2kv(response)?;
|
||||||
|
println!("{}", kv2line(kvs),);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
pub mod device;
|
pub mod device;
|
||||||
pub mod host;
|
pub mod host;
|
||||||
pub mod image;
|
pub mod image;
|
||||||
|
pub mod network;
|
||||||
pub mod zone;
|
pub mod zone;
|
||||||
|
|
||||||
use crate::cli::device::DeviceCommand;
|
use crate::cli::device::DeviceCommand;
|
||||||
@ -12,8 +13,9 @@ use clap::Parser;
|
|||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest},
|
v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest},
|
||||||
};
|
};
|
||||||
|
use network::NetworkCommand;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@ -31,10 +33,12 @@ pub struct ControlCommand {
|
|||||||
command: ControlCommands,
|
command: ControlCommands,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub enum ControlCommands {
|
pub enum ControlCommands {
|
||||||
Zone(ZoneCommand),
|
Zone(ZoneCommand),
|
||||||
Image(ImageCommand),
|
Image(ImageCommand),
|
||||||
|
Network(NetworkCommand),
|
||||||
Device(DeviceCommand),
|
Device(DeviceCommand),
|
||||||
Host(HostCommand),
|
Host(HostCommand),
|
||||||
}
|
}
|
||||||
@ -43,15 +47,26 @@ impl ControlCommand {
|
|||||||
pub async fn run(self) -> Result<()> {
|
pub async fn run(self) -> Result<()> {
|
||||||
let client = ControlClientProvider::dial(self.connection.parse()?).await?;
|
let client = ControlClientProvider::dial(self.connection.parse()?).await?;
|
||||||
let events = EventStream::open(client.clone()).await?;
|
let events = EventStream::open(client.clone()).await?;
|
||||||
|
self.command.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match self.command {
|
impl ControlCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
ControlCommands::Zone(zone) => zone.run(client, events).await,
|
ControlCommands::Zone(zone) => zone.run(client, events).await,
|
||||||
|
|
||||||
|
ControlCommands::Network(network) => network.run(client, events).await,
|
||||||
|
|
||||||
ControlCommands::Image(image) => image.run(client, events).await,
|
ControlCommands::Image(image) => image.run(client, events).await,
|
||||||
|
|
||||||
ControlCommands::Device(device) => device.run(client, events).await,
|
ControlCommands::Device(device) => device.run(client, events).await,
|
||||||
|
|
||||||
ControlCommands::Host(snoop) => snoop.run(client, events).await,
|
ControlCommands::Host(host) => host.run(client, events).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -61,14 +76,14 @@ pub async fn resolve_zone(
|
|||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_zone(Request::new(ResolveZoneRequest {
|
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
if let Some(zone) = reply.zone {
|
if !reply.zone_id.is_empty() {
|
||||||
Ok(zone.id)
|
Ok(reply.zone_id)
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("unable to resolve zone '{}'", name))
|
Err(anyhow!("unable to resolve zone '{}'", name))
|
||||||
}
|
}
|
||||||
|
43
crates/ctl/src/cli/network/mod.rs
Normal file
43
crates/ctl/src/cli/network/mod.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use reservation::NetworkReservationCommand;
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
pub mod reservation;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage the network on the isolation engine")]
|
||||||
|
pub struct NetworkCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: NetworkCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum NetworkCommands {
|
||||||
|
Reservation(NetworkReservationCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
NetworkCommands::Reservation(reservation) => reservation.run(client, events).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
125
crates/ctl/src/cli/network/reservation/list.rs
Normal file
125
crates/ctl/src/cli/network/reservation/list.rs
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Table};
|
||||||
|
use krata::{
|
||||||
|
events::EventStream,
|
||||||
|
v1::{
|
||||||
|
common::NetworkReservation,
|
||||||
|
control::{control_service_client::ControlServiceClient, ListNetworkReservationsRequest},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serde_json::Value;
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
|
enum NetworkReservationListFormat {
|
||||||
|
Table,
|
||||||
|
Json,
|
||||||
|
JsonPretty,
|
||||||
|
Jsonl,
|
||||||
|
Yaml,
|
||||||
|
KeyValue,
|
||||||
|
Simple,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "List network reservation information")]
|
||||||
|
pub struct NetworkReservationListCommand {
|
||||||
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
|
format: NetworkReservationListFormat,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkReservationListCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
mut client: ControlServiceClient<Channel>,
|
||||||
|
_events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
let reply = client
|
||||||
|
.list_network_reservations(ListNetworkReservationsRequest {})
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
let mut reservations = reply.reservations;
|
||||||
|
|
||||||
|
reservations.sort_by(|a, b| a.uuid.cmp(&b.uuid));
|
||||||
|
|
||||||
|
match self.format {
|
||||||
|
NetworkReservationListFormat::Table => {
|
||||||
|
self.print_reservations_table(reservations)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
NetworkReservationListFormat::Simple => {
|
||||||
|
for reservation in reservations {
|
||||||
|
println!(
|
||||||
|
"{}\t{}\t{}\t{}",
|
||||||
|
reservation.uuid, reservation.ipv4, reservation.ipv6, reservation.mac
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NetworkReservationListFormat::Json
|
||||||
|
| NetworkReservationListFormat::JsonPretty
|
||||||
|
| NetworkReservationListFormat::Yaml => {
|
||||||
|
let mut values = Vec::new();
|
||||||
|
for device in reservations {
|
||||||
|
let message = proto2dynamic(device)?;
|
||||||
|
values.push(serde_json::to_value(message)?);
|
||||||
|
}
|
||||||
|
let value = Value::Array(values);
|
||||||
|
let encoded = if self.format == NetworkReservationListFormat::JsonPretty {
|
||||||
|
serde_json::to_string_pretty(&value)?
|
||||||
|
} else if self.format == NetworkReservationListFormat::Yaml {
|
||||||
|
serde_yaml::to_string(&value)?
|
||||||
|
} else {
|
||||||
|
serde_json::to_string(&value)?
|
||||||
|
};
|
||||||
|
println!("{}", encoded.trim());
|
||||||
|
}
|
||||||
|
|
||||||
|
NetworkReservationListFormat::Jsonl => {
|
||||||
|
for device in reservations {
|
||||||
|
let message = proto2dynamic(device)?;
|
||||||
|
println!("{}", serde_json::to_string(&message)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NetworkReservationListFormat::KeyValue => {
|
||||||
|
self.print_key_value(reservations)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn print_reservations_table(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
|
||||||
|
let mut table = Table::new();
|
||||||
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
|
table.set_header(vec!["uuid", "ipv4", "ipv6", "mac"]);
|
||||||
|
for reservation in reservations {
|
||||||
|
table.add_row(vec![
|
||||||
|
Cell::new(reservation.uuid),
|
||||||
|
Cell::new(reservation.ipv4),
|
||||||
|
Cell::new(reservation.ipv6),
|
||||||
|
Cell::new(reservation.mac),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
if table.is_empty() {
|
||||||
|
println!("no network reservations found");
|
||||||
|
} else {
|
||||||
|
println!("{}", table);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn print_key_value(&self, reservations: Vec<NetworkReservation>) -> Result<()> {
|
||||||
|
for reservation in reservations {
|
||||||
|
let kvs = proto2kv(reservation)?;
|
||||||
|
println!("{}", kv2line(kvs));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
43
crates/ctl/src/cli/network/reservation/mod.rs
Normal file
43
crates/ctl/src/cli/network/reservation/mod.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use list::NetworkReservationListCommand;
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
pub mod list;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage network reservations")]
|
||||||
|
pub struct NetworkReservationCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: NetworkReservationCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkReservationCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum NetworkReservationCommands {
|
||||||
|
List(NetworkReservationListCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkReservationCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
NetworkReservationCommands::List(list) => list.run(client, events).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -23,10 +23,10 @@ impl ZoneAttachCommand {
|
|||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let input = StdioConsoleStream::stdin_stream(zone_id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(zone_id.clone(), false).await;
|
||||||
let output = client.attach_zone_console(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
|
||||||
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
|
@ -2,20 +2,16 @@ use anyhow::Result;
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::control::{
|
||||||
common::ZoneStatus,
|
control_service_client::ControlServiceClient, watch_events_reply::Event, DestroyZoneRequest,
|
||||||
control::{
|
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
|
||||||
DestroyZoneRequest,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::cli::resolve_zone;
|
||||||
|
use krata::v1::common::ZoneState;
|
||||||
use log::error;
|
use log::error;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::cli::resolve_zone;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Destroy a zone")]
|
#[command(about = "Destroy a zone")]
|
||||||
pub struct ZoneDestroyCommand {
|
pub struct ZoneDestroyCommand {
|
||||||
@ -61,12 +57,12 @@ async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = zone.state else {
|
let Some(status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = status.error_status {
|
||||||
if state.status() == ZoneStatus::Failed {
|
if status.state() == ZoneState::Failed {
|
||||||
error!("destroy failed: {}", error.message);
|
error!("destroy failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
@ -74,7 +70,7 @@ async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == ZoneStatus::Destroyed {
|
if status.state() == ZoneState::Destroyed {
|
||||||
std::process::exit(0);
|
std::process::exit(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,13 @@ use std::collections::HashMap;
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use crossterm::tty::IsTty;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{ZoneTaskSpec, ZoneTaskSpecEnvVar},
|
common::{TerminalSize, ZoneTaskSpec, ZoneTaskSpecEnvVar},
|
||||||
control::{control_service_client::ControlServiceClient, ExecZoneRequest},
|
control::{control_service_client::ControlServiceClient, ExecInsideZoneRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use tokio::io::stdin;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
@ -21,6 +23,8 @@ pub struct ZoneExecCommand {
|
|||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
|
#[arg(short = 't', long, help = "Allocate tty")]
|
||||||
|
tty: bool,
|
||||||
#[arg(help = "Zone to exec inside, either the name or the uuid")]
|
#[arg(help = "Zone to exec inside, either the name or the uuid")]
|
||||||
zone: String,
|
zone: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
@ -34,7 +38,8 @@ pub struct ZoneExecCommand {
|
|||||||
impl ZoneExecCommand {
|
impl ZoneExecCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let initial = ExecZoneRequest {
|
let should_map_tty = self.tty && stdin().is_tty();
|
||||||
|
let initial = ExecInsideZoneRequest {
|
||||||
zone_id,
|
zone_id,
|
||||||
task: Some(ZoneTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
@ -46,15 +51,29 @@ impl ZoneExecCommand {
|
|||||||
.collect(),
|
.collect(),
|
||||||
command: self.command,
|
command: self.command,
|
||||||
working_directory: self.working_directory.unwrap_or_default(),
|
working_directory: self.working_directory.unwrap_or_default(),
|
||||||
|
tty: self.tty,
|
||||||
}),
|
}),
|
||||||
data: vec![],
|
stdin: vec![],
|
||||||
|
stdin_closed: false,
|
||||||
|
terminal_size: if should_map_tty {
|
||||||
|
let size = crossterm::terminal::size().ok();
|
||||||
|
size.map(|(columns, rows)| TerminalSize {
|
||||||
|
rows: rows as u32,
|
||||||
|
columns: columns as u32,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
let stream = StdioConsoleStream::input_stream_exec(initial, should_map_tty).await;
|
||||||
|
|
||||||
let response = client.exec_zone(Request::new(stream)).await?.into_inner();
|
let response = client
|
||||||
|
.exec_inside_zone(Request::new(stream))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
|
||||||
let code = StdioConsoleStream::exec_output(response).await?;
|
let code = StdioConsoleStream::exec_output(response, should_map_tty).await?;
|
||||||
std::process::exit(code);
|
std::process::exit(code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,9 @@ use krata::{
|
|||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{
|
common::{
|
||||||
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneOciImageSpec, ZoneSpec,
|
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneKernelOptionsSpec,
|
||||||
ZoneSpecDevice, ZoneStatus, ZoneTaskSpec, ZoneTaskSpecEnvVar,
|
ZoneOciImageSpec, ZoneResourceSpec, ZoneSpec, ZoneSpecDevice, ZoneState, ZoneTaskSpec,
|
||||||
|
ZoneTaskSpecEnvVar,
|
||||||
},
|
},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
@ -38,19 +39,40 @@ pub struct ZoneLaunchCommand {
|
|||||||
pull_update: bool,
|
pull_update: bool,
|
||||||
#[arg(short, long, help = "Name of the zone")]
|
#[arg(short, long, help = "Name of the zone")]
|
||||||
name: Option<String>,
|
name: Option<String>,
|
||||||
#[arg(short, long, default_value_t = 1, help = "vCPUs available to the zone")]
|
|
||||||
cpus: u32,
|
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short = 'C',
|
||||||
long,
|
long = "max-cpus",
|
||||||
default_value_t = 512,
|
default_value_t = 4,
|
||||||
help = "Memory available to the zone, in megabytes"
|
help = "Maximum vCPUs available for the zone"
|
||||||
)]
|
)]
|
||||||
mem: u64,
|
max_cpus: u32,
|
||||||
|
#[arg(
|
||||||
|
short = 'c',
|
||||||
|
long = "target-cpus",
|
||||||
|
default_value_t = 1,
|
||||||
|
help = "Target vCPUs for the zone to use"
|
||||||
|
)]
|
||||||
|
target_cpus: u32,
|
||||||
|
#[arg(
|
||||||
|
short = 'M',
|
||||||
|
long = "max-memory",
|
||||||
|
default_value_t = 1024,
|
||||||
|
help = "Maximum memory available to the zone, in megabytes"
|
||||||
|
)]
|
||||||
|
max_memory: u64,
|
||||||
|
#[arg(
|
||||||
|
short = 'm',
|
||||||
|
long = "target-memory",
|
||||||
|
default_value_t = 1024,
|
||||||
|
help = "Target memory for the zone to use, in megabytes"
|
||||||
|
)]
|
||||||
|
target_memory: u64,
|
||||||
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
|
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
|
||||||
device: Vec<String>,
|
device: Vec<String>,
|
||||||
#[arg[short, long, help = "Environment variables set in the zone"]]
|
#[arg[short, long, help = "Environment variables set in the zone"]]
|
||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
|
#[arg(short = 't', long, help = "Allocate tty for task")]
|
||||||
|
tty: bool,
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short,
|
||||||
long,
|
long,
|
||||||
@ -69,6 +91,10 @@ pub struct ZoneLaunchCommand {
|
|||||||
initrd: Option<String>,
|
initrd: Option<String>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
|
#[arg(long, help = "Enable verbose logging on the kernel")]
|
||||||
|
kernel_verbose: bool,
|
||||||
|
#[arg(long, help = "Additional kernel cmdline options")]
|
||||||
|
kernel_cmdline_append: Option<String>,
|
||||||
#[arg(help = "Container image for zone to use")]
|
#[arg(help = "Container image for zone to use")]
|
||||||
oci: String,
|
oci: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
@ -120,8 +146,12 @@ impl ZoneLaunchCommand {
|
|||||||
image: Some(image),
|
image: Some(image),
|
||||||
kernel,
|
kernel,
|
||||||
initrd,
|
initrd,
|
||||||
vcpus: self.cpus,
|
initial_resources: Some(ZoneResourceSpec {
|
||||||
mem: self.mem,
|
max_memory: self.max_memory,
|
||||||
|
target_memory: self.target_memory,
|
||||||
|
max_cpus: self.max_cpus,
|
||||||
|
target_cpus: self.target_cpus,
|
||||||
|
}),
|
||||||
task: Some(ZoneTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
.iter()
|
.iter()
|
||||||
@ -132,6 +162,7 @@ impl ZoneLaunchCommand {
|
|||||||
.collect(),
|
.collect(),
|
||||||
command: self.command,
|
command: self.command,
|
||||||
working_directory: self.working_directory.unwrap_or_default(),
|
working_directory: self.working_directory.unwrap_or_default(),
|
||||||
|
tty: self.tty,
|
||||||
}),
|
}),
|
||||||
annotations: vec![],
|
annotations: vec![],
|
||||||
devices: self
|
devices: self
|
||||||
@ -139,6 +170,10 @@ impl ZoneLaunchCommand {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|name| ZoneSpecDevice { name: name.clone() })
|
.map(|name| ZoneSpecDevice { name: name.clone() })
|
||||||
.collect(),
|
.collect(),
|
||||||
|
kernel_options: Some(ZoneKernelOptionsSpec {
|
||||||
|
verbose: self.kernel_verbose,
|
||||||
|
cmdline_append: self.kernel_cmdline_append.clone().unwrap_or_default(),
|
||||||
|
}),
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
let response = client
|
let response = client
|
||||||
@ -152,10 +187,10 @@ impl ZoneLaunchCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let code = if self.attach {
|
let code = if self.attach {
|
||||||
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(id.clone(), true).await;
|
||||||
let output = client.attach_zone_console(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, true).await });
|
||||||
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
|
||||||
select! {
|
select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
@ -209,12 +244,12 @@ async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = zone.state else {
|
let Some(status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = status.error_status {
|
||||||
if state.status() == ZoneStatus::Failed {
|
if status.state() == ZoneState::Failed {
|
||||||
error!("launch failed: {}", error.message);
|
error!("launch failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
@ -222,12 +257,12 @@ async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == ZoneStatus::Destroyed {
|
if status.state() == ZoneState::Destroyed {
|
||||||
error!("zone destroyed");
|
error!("zone destroyed");
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == ZoneStatus::Started {
|
if status.state() == ZoneState::Created {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,18 +4,19 @@ use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Zone, ZoneStatus},
|
common::Zone,
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneRequest,
|
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneIdRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_state_text};
|
||||||
|
use krata::v1::common::ZoneState;
|
||||||
|
use krata::v1::control::GetZoneRequest;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_status_text};
|
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum ZoneListFormat {
|
enum ZoneListFormat {
|
||||||
Table,
|
Table,
|
||||||
@ -28,7 +29,7 @@ enum ZoneListFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "List the zones on the isolation engine")]
|
#[command(about = "List zone information")]
|
||||||
pub struct ZoneListCommand {
|
pub struct ZoneListCommand {
|
||||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
format: ZoneListFormat,
|
format: ZoneListFormat,
|
||||||
@ -44,11 +45,21 @@ impl ZoneListCommand {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut zones = if let Some(ref zone) = self.zone {
|
let mut zones = if let Some(ref zone) = self.zone {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_zone(Request::new(ResolveZoneRequest { name: zone.clone() }))
|
.resolve_zone_id(Request::new(ResolveZoneIdRequest { name: zone.clone() }))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(zone) = reply.zone {
|
if !reply.zone_id.is_empty() {
|
||||||
vec![zone]
|
let reply = client
|
||||||
|
.get_zone(Request::new(GetZoneRequest {
|
||||||
|
zone_id: reply.zone_id,
|
||||||
|
}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
if let Some(zone) = reply.zone {
|
||||||
|
vec![zone]
|
||||||
|
} else {
|
||||||
|
return Err(anyhow!("unable to resolve zone '{}'", zone));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("unable to resolve zone '{}'", zone));
|
return Err(anyhow!("unable to resolve zone '{}'", zone));
|
||||||
}
|
}
|
||||||
@ -115,30 +126,30 @@ impl ZoneListCommand {
|
|||||||
let mut table = Table::new();
|
let mut table = Table::new();
|
||||||
table.load_preset(UTF8_FULL_CONDENSED);
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
table.set_header(vec!["name", "uuid", "state", "ipv4", "ipv6"]);
|
||||||
for zone in zones {
|
for zone in zones {
|
||||||
let ipv4 = zone
|
let ipv4 = zone
|
||||||
.state
|
.status
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network_status.as_ref())
|
||||||
.map(|x| x.zone_ipv4.as_str())
|
.map(|x| x.zone_ipv4.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let ipv6 = zone
|
let ipv6 = zone
|
||||||
.state
|
.status
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network_status.as_ref())
|
||||||
.map(|x| x.zone_ipv6.as_str())
|
.map(|x| x.zone_ipv6.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let Some(spec) = zone.spec else {
|
let Some(spec) = zone.spec else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let status = zone.state.as_ref().cloned().unwrap_or_default().status();
|
let state = zone.status.as_ref().cloned().unwrap_or_default().state();
|
||||||
let status_text = zone_status_text(status);
|
let status_text = zone_state_text(state);
|
||||||
|
|
||||||
let status_color = match status {
|
let status_color = match state {
|
||||||
ZoneStatus::Destroyed | ZoneStatus::Failed => Color::Red,
|
ZoneState::Destroyed | ZoneState::Failed => Color::Red,
|
||||||
ZoneStatus::Destroying | ZoneStatus::Exited | ZoneStatus::Starting => Color::Yellow,
|
ZoneState::Destroying | ZoneState::Exited | ZoneState::Creating => Color::Yellow,
|
||||||
ZoneStatus::Started => Color::Green,
|
ZoneState::Created => Color::Green,
|
||||||
_ => Color::Reset,
|
_ => Color::Reset,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ impl ZoneLogsCommand {
|
|||||||
let zone_id_stream = zone_id.clone();
|
let zone_id_stream = zone_id.clone();
|
||||||
let follow = self.follow;
|
let follow = self.follow;
|
||||||
let input = stream! {
|
let input = stream! {
|
||||||
yield ZoneConsoleRequest { zone_id: zone_id_stream, data: Vec::new() };
|
yield ZoneConsoleRequest { zone_id: zone_id_stream, replay_history: true, data: Vec::new() };
|
||||||
if follow {
|
if follow {
|
||||||
let mut pending = pending::<ZoneConsoleRequest>();
|
let mut pending = pending::<ZoneConsoleRequest>();
|
||||||
while let Some(x) = pending.next().await {
|
while let Some(x) = pending.next().await {
|
||||||
@ -43,7 +43,7 @@ impl ZoneLogsCommand {
|
|||||||
};
|
};
|
||||||
let output = client.attach_zone_console(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output, false).await });
|
||||||
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
|
@ -14,6 +14,7 @@ use crate::cli::zone::logs::ZoneLogsCommand;
|
|||||||
use crate::cli::zone::metrics::ZoneMetricsCommand;
|
use crate::cli::zone::metrics::ZoneMetricsCommand;
|
||||||
use crate::cli::zone::resolve::ZoneResolveCommand;
|
use crate::cli::zone::resolve::ZoneResolveCommand;
|
||||||
use crate::cli::zone::top::ZoneTopCommand;
|
use crate::cli::zone::top::ZoneTopCommand;
|
||||||
|
use crate::cli::zone::update_resources::ZoneUpdateResourcesCommand;
|
||||||
use crate::cli::zone::watch::ZoneWatchCommand;
|
use crate::cli::zone::watch::ZoneWatchCommand;
|
||||||
|
|
||||||
pub mod attach;
|
pub mod attach;
|
||||||
@ -25,6 +26,7 @@ pub mod logs;
|
|||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod resolve;
|
pub mod resolve;
|
||||||
pub mod top;
|
pub mod top;
|
||||||
|
pub mod update_resources;
|
||||||
pub mod watch;
|
pub mod watch;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
@ -44,6 +46,7 @@ impl ZoneCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
pub enum ZoneCommands {
|
pub enum ZoneCommands {
|
||||||
Attach(ZoneAttachCommand),
|
Attach(ZoneAttachCommand),
|
||||||
@ -56,6 +59,7 @@ pub enum ZoneCommands {
|
|||||||
Resolve(ZoneResolveCommand),
|
Resolve(ZoneResolveCommand),
|
||||||
Top(ZoneTopCommand),
|
Top(ZoneTopCommand),
|
||||||
Watch(ZoneWatchCommand),
|
Watch(ZoneWatchCommand),
|
||||||
|
UpdateResources(ZoneUpdateResourcesCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZoneCommands {
|
impl ZoneCommands {
|
||||||
@ -84,6 +88,8 @@ impl ZoneCommands {
|
|||||||
ZoneCommands::Top(top) => top.run(client, events).await,
|
ZoneCommands::Top(top) => top.run(client, events).await,
|
||||||
|
|
||||||
ZoneCommands::Exec(exec) => exec.run(client).await,
|
ZoneCommands::Exec(exec) => exec.run(client).await,
|
||||||
|
|
||||||
|
ZoneCommands::UpdateResources(update_resources) => update_resources.run(client).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest};
|
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneIdRequest};
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
@ -14,13 +14,13 @@ pub struct ZoneResolveCommand {
|
|||||||
impl ZoneResolveCommand {
|
impl ZoneResolveCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_zone(Request::new(ResolveZoneRequest {
|
.resolve_zone_id(Request::new(ResolveZoneIdRequest {
|
||||||
name: self.zone.clone(),
|
name: self.zone.clone(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(zone) = reply.zone {
|
if !reply.zone_id.is_empty() {
|
||||||
println!("{}", zone.id);
|
println!("{}", reply.zone_id);
|
||||||
} else {
|
} else {
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ use ratatui::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
format::zone_status_text,
|
format::zone_state_text,
|
||||||
metrics::{
|
metrics::{
|
||||||
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
||||||
},
|
},
|
||||||
@ -106,13 +106,13 @@ impl ZoneTopApp {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn render_frame(&mut self, frame: &mut Frame) {
|
fn render_frame(&mut self, frame: &mut Frame) {
|
||||||
frame.render_widget(self, frame.size());
|
frame.render_widget(self, frame.area());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_event(&mut self, event: Event) -> io::Result<()> {
|
fn handle_event(&mut self, event: Event) -> io::Result<()> {
|
||||||
@ -157,7 +157,7 @@ impl Widget for &mut ZoneTopApp {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = ms.zone.state else {
|
let Some(ref status) = ms.zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -177,7 +177,7 @@ impl Widget for &mut ZoneTopApp {
|
|||||||
let row = Row::new(vec![
|
let row = Row::new(vec![
|
||||||
spec.name.clone(),
|
spec.name.clone(),
|
||||||
ms.zone.id.clone(),
|
ms.zone.id.clone(),
|
||||||
zone_status_text(state.status()),
|
zone_state_text(status.state()),
|
||||||
memory_total.unwrap_or_default(),
|
memory_total.unwrap_or_default(),
|
||||||
memory_used.unwrap_or_default(),
|
memory_used.unwrap_or_default(),
|
||||||
memory_free.unwrap_or_default(),
|
memory_free.unwrap_or_default(),
|
||||||
|
93
crates/ctl/src/cli/zone/update_resources.rs
Normal file
93
crates/ctl/src/cli/zone/update_resources.rs
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
use krata::v1::{
|
||||||
|
common::ZoneResourceSpec,
|
||||||
|
control::{control_service_client::ControlServiceClient, UpdateZoneResourcesRequest},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::cli::resolve_zone;
|
||||||
|
use krata::v1::control::GetZoneRequest;
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Update the available resources to a zone")]
|
||||||
|
pub struct ZoneUpdateResourcesCommand {
|
||||||
|
#[arg(help = "Zone to update resources of, either the name or the uuid")]
|
||||||
|
zone: String,
|
||||||
|
#[arg(
|
||||||
|
short = 'C',
|
||||||
|
long = "max-cpus",
|
||||||
|
default_value_t = 0,
|
||||||
|
help = "Maximum vCPUs available to the zone (0 means previous value)"
|
||||||
|
)]
|
||||||
|
max_cpus: u32,
|
||||||
|
#[arg(
|
||||||
|
short = 'c',
|
||||||
|
long = "target-cpus",
|
||||||
|
default_value_t = 0,
|
||||||
|
help = "Target vCPUs for the zone to use (0 means previous value)"
|
||||||
|
)]
|
||||||
|
target_cpus: u32,
|
||||||
|
#[arg(
|
||||||
|
short = 'M',
|
||||||
|
long = "max-memory",
|
||||||
|
default_value_t = 0,
|
||||||
|
help = "Maximum memory available to the zone, in megabytes (0 means previous value)"
|
||||||
|
)]
|
||||||
|
max_memory: u64,
|
||||||
|
#[arg(
|
||||||
|
short = 'm',
|
||||||
|
long = "target-memory",
|
||||||
|
default_value_t = 0,
|
||||||
|
help = "Target memory for the zone to use, in megabytes (0 means previous value)"
|
||||||
|
)]
|
||||||
|
target_memory: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZoneUpdateResourcesCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let zone_id = resolve_zone(&mut client, &self.zone).await?;
|
||||||
|
let zone = client
|
||||||
|
.get_zone(GetZoneRequest { zone_id })
|
||||||
|
.await?
|
||||||
|
.into_inner()
|
||||||
|
.zone
|
||||||
|
.unwrap_or_default();
|
||||||
|
let active_resources = zone
|
||||||
|
.status
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.resource_status
|
||||||
|
.unwrap_or_default()
|
||||||
|
.active_resources
|
||||||
|
.unwrap_or_default();
|
||||||
|
client
|
||||||
|
.update_zone_resources(Request::new(UpdateZoneResourcesRequest {
|
||||||
|
zone_id: zone.id.clone(),
|
||||||
|
resources: Some(ZoneResourceSpec {
|
||||||
|
max_memory: if self.max_memory == 0 {
|
||||||
|
active_resources.max_memory
|
||||||
|
} else {
|
||||||
|
self.max_memory
|
||||||
|
},
|
||||||
|
target_memory: if self.target_memory == 0 {
|
||||||
|
active_resources.target_memory
|
||||||
|
} else {
|
||||||
|
self.target_memory
|
||||||
|
},
|
||||||
|
max_cpus: if self.max_cpus == 0 {
|
||||||
|
active_resources.max_cpus
|
||||||
|
} else {
|
||||||
|
self.max_cpus
|
||||||
|
},
|
||||||
|
target_cpus: if self.target_cpus == 0 {
|
||||||
|
active_resources.target_cpus
|
||||||
|
} else {
|
||||||
|
self.target_cpus
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}))
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -1,22 +1,22 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::Result;
|
||||||
use async_stream::stream;
|
use async_stream::stream;
|
||||||
use crossterm::{
|
use crossterm::{
|
||||||
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
|
terminal::{disable_raw_mode, enable_raw_mode, is_raw_mode_enabled},
|
||||||
tty::IsTty,
|
tty::IsTty,
|
||||||
};
|
};
|
||||||
|
use krata::v1::common::ZoneState;
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::common::TerminalSize,
|
||||||
common::ZoneStatus,
|
v1::control::{
|
||||||
control::{
|
watch_events_reply::Event, ExecInsideZoneReply, ExecInsideZoneRequest, ZoneConsoleReply,
|
||||||
watch_events_reply::Event, ExecZoneReply, ExecZoneRequest, ZoneConsoleReply,
|
ZoneConsoleRequest,
|
||||||
ZoneConsoleRequest,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use log::debug;
|
use log::debug;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
|
io::{stderr, stdin, stdout, AsyncReadExt, AsyncWriteExt},
|
||||||
|
select,
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
};
|
};
|
||||||
use tokio_stream::{Stream, StreamExt};
|
use tokio_stream::{Stream, StreamExt};
|
||||||
@ -24,11 +24,19 @@ use tonic::Streaming;
|
|||||||
|
|
||||||
pub struct StdioConsoleStream;
|
pub struct StdioConsoleStream;
|
||||||
|
|
||||||
|
enum ExecStdinSelect {
|
||||||
|
DataRead(std::io::Result<usize>),
|
||||||
|
TerminalResize,
|
||||||
|
}
|
||||||
|
|
||||||
impl StdioConsoleStream {
|
impl StdioConsoleStream {
|
||||||
pub async fn stdin_stream(zone: String) -> impl Stream<Item = ZoneConsoleRequest> {
|
pub async fn stdin_stream(
|
||||||
|
zone: String,
|
||||||
|
replay_history: bool,
|
||||||
|
) -> impl Stream<Item = ZoneConsoleRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield ZoneConsoleRequest { zone_id: zone, data: vec![] };
|
yield ZoneConsoleRequest { zone_id: zone, replay_history, data: vec![] };
|
||||||
|
|
||||||
let mut buffer = vec![0u8; 60];
|
let mut buffer = vec![0u8; 60];
|
||||||
loop {
|
loop {
|
||||||
@ -43,38 +51,118 @@ impl StdioConsoleStream {
|
|||||||
if size == 1 && buffer[0] == 0x1d {
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
yield ZoneConsoleRequest { zone_id: String::default(), data };
|
yield ZoneConsoleRequest { zone_id: String::default(), replay_history, data };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdin_stream_exec(
|
#[cfg(unix)]
|
||||||
initial: ExecZoneRequest,
|
pub async fn input_stream_exec(
|
||||||
) -> impl Stream<Item = ExecZoneRequest> {
|
initial: ExecInsideZoneRequest,
|
||||||
|
tty: bool,
|
||||||
|
) -> impl Stream<Item = ExecInsideZoneRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield initial;
|
yield initial;
|
||||||
|
|
||||||
let mut buffer = vec![0u8; 60];
|
let mut buffer = vec![0u8; 60];
|
||||||
|
let mut terminal_size_change = if tty {
|
||||||
|
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::window_change()).ok()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let mut stdin_closed = false;
|
||||||
loop {
|
loop {
|
||||||
let size = match stdin.read(&mut buffer).await {
|
let selected = if let Some(ref mut terminal_size_change) = terminal_size_change {
|
||||||
Ok(size) => size,
|
if stdin_closed {
|
||||||
Err(error) => {
|
select! {
|
||||||
debug!("failed to read stdin: {}", error);
|
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
|
||||||
break;
|
}
|
||||||
|
} else {
|
||||||
|
select! {
|
||||||
|
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
|
||||||
|
_ = terminal_size_change.recv() => ExecStdinSelect::TerminalResize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select! {
|
||||||
|
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let data = buffer[0..size].to_vec();
|
|
||||||
if size == 1 && buffer[0] == 0x1d {
|
match selected {
|
||||||
break;
|
ExecStdinSelect::DataRead(result) => {
|
||||||
|
match result {
|
||||||
|
Ok(size) => {
|
||||||
|
let stdin = buffer[0..size].to_vec();
|
||||||
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
stdin_closed = size == 0;
|
||||||
|
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
|
||||||
|
},
|
||||||
|
Err(error) => {
|
||||||
|
debug!("failed to read stdin: {}", error);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ExecStdinSelect::TerminalResize => {
|
||||||
|
if let Ok((columns, rows)) = crossterm::terminal::size() {
|
||||||
|
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: Some(TerminalSize {
|
||||||
|
rows: rows as u32,
|
||||||
|
columns: columns as u32,
|
||||||
|
}), stdin: vec![], stdin_closed: false, };
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
yield ExecZoneRequest { zone_id: String::default(), task: None, data };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>) -> Result<()> {
|
#[cfg(not(unix))]
|
||||||
if stdin().is_tty() {
|
pub async fn input_stream_exec(
|
||||||
|
initial: ExecInsideZoneRequest,
|
||||||
|
_tty: bool,
|
||||||
|
) -> impl Stream<Item = ExecInsideZoneRequest> {
|
||||||
|
let mut stdin = stdin();
|
||||||
|
stream! {
|
||||||
|
yield initial;
|
||||||
|
|
||||||
|
let mut buffer = vec![0u8; 60];
|
||||||
|
let mut stdin_closed = false;
|
||||||
|
loop {
|
||||||
|
let selected = select! {
|
||||||
|
result = stdin.read(&mut buffer) => ExecStdinSelect::DataRead(result),
|
||||||
|
};
|
||||||
|
|
||||||
|
match selected {
|
||||||
|
ExecStdinSelect::DataRead(result) => {
|
||||||
|
match result {
|
||||||
|
Ok(size) => {
|
||||||
|
let stdin = buffer[0..size].to_vec();
|
||||||
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
stdin_closed = size == 0;
|
||||||
|
yield ExecInsideZoneRequest { zone_id: String::default(), task: None, terminal_size: None, stdin, stdin_closed, };
|
||||||
|
},
|
||||||
|
Err(error) => {
|
||||||
|
debug!("failed to read stdin: {}", error);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>, raw: bool) -> Result<()> {
|
||||||
|
if raw && stdin().is_tty() {
|
||||||
enable_raw_mode()?;
|
enable_raw_mode()?;
|
||||||
StdioConsoleStream::register_terminal_restore_hook()?;
|
StdioConsoleStream::register_terminal_restore_hook()?;
|
||||||
}
|
}
|
||||||
@ -90,7 +178,11 @@ impl StdioConsoleStream {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn exec_output(mut stream: Streaming<ExecZoneReply>) -> Result<i32> {
|
pub async fn exec_output(mut stream: Streaming<ExecInsideZoneReply>, raw: bool) -> Result<i32> {
|
||||||
|
if raw {
|
||||||
|
enable_raw_mode()?;
|
||||||
|
StdioConsoleStream::register_terminal_restore_hook()?;
|
||||||
|
}
|
||||||
let mut stdout = stdout();
|
let mut stdout = stdout();
|
||||||
let mut stderr = stderr();
|
let mut stderr = stderr();
|
||||||
while let Some(reply) = stream.next().await {
|
while let Some(reply) = stream.next().await {
|
||||||
@ -109,7 +201,12 @@ impl StdioConsoleStream {
|
|||||||
return if reply.error.is_empty() {
|
return if reply.error.is_empty() {
|
||||||
Ok(reply.exit_code)
|
Ok(reply.exit_code)
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("exec failed: {}", reply.error))
|
StdioConsoleStream::restore_terminal_mode();
|
||||||
|
stderr
|
||||||
|
.write_all(format!("Error: exec failed: {}\n", reply.error).as_bytes())
|
||||||
|
.await?;
|
||||||
|
stderr.flush().await?;
|
||||||
|
Ok(-1)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -128,7 +225,7 @@ impl StdioConsoleStream {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(state) = zone.state else {
|
let Some(status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -136,12 +233,12 @@ impl StdioConsoleStream {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(exit_info) = state.exit_info {
|
if let Some(exit_status) = status.exit_status {
|
||||||
return Some(exit_info.code);
|
return Some(exit_status.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
let status = state.status();
|
let state = status.state();
|
||||||
if status == ZoneStatus::Destroying || status == ZoneStatus::Destroyed {
|
if state == ZoneState::Destroying || state == ZoneState::Destroyed {
|
||||||
return Some(10);
|
return Some(10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,12 @@ use std::{collections::HashMap, time::Duration};
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use fancy_duration::FancyDuration;
|
use fancy_duration::FancyDuration;
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneStatus};
|
|
||||||
use prost_reflect::{DynamicMessage, ReflectMessage};
|
use prost_reflect::{DynamicMessage, ReflectMessage};
|
||||||
use prost_types::Value;
|
use prost_types::Value;
|
||||||
use termtree::Tree;
|
use termtree::Tree;
|
||||||
|
|
||||||
|
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneState};
|
||||||
|
|
||||||
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
|
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
|
||||||
Ok(DynamicMessage::decode(
|
Ok(DynamicMessage::decode(
|
||||||
proto.descriptor(),
|
proto.descriptor(),
|
||||||
@ -75,30 +76,30 @@ pub fn kv2line(map: HashMap<String, String>) -> String {
|
|||||||
.join(" ")
|
.join(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn zone_status_text(status: ZoneStatus) -> String {
|
pub fn zone_state_text(status: ZoneState) -> String {
|
||||||
match status {
|
match status {
|
||||||
ZoneStatus::Starting => "starting",
|
ZoneState::Creating => "creating",
|
||||||
ZoneStatus::Started => "started",
|
ZoneState::Created => "created",
|
||||||
ZoneStatus::Destroying => "destroying",
|
ZoneState::Destroying => "destroying",
|
||||||
ZoneStatus::Destroyed => "destroyed",
|
ZoneState::Destroyed => "destroyed",
|
||||||
ZoneStatus::Exited => "exited",
|
ZoneState::Exited => "exited",
|
||||||
ZoneStatus::Failed => "failed",
|
ZoneState::Failed => "failed",
|
||||||
_ => "unknown",
|
_ => "unknown",
|
||||||
}
|
}
|
||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn zone_simple_line(zone: &Zone) -> String {
|
pub fn zone_simple_line(zone: &Zone) -> String {
|
||||||
let state = zone_status_text(
|
let state = zone_state_text(
|
||||||
zone.state
|
zone.status
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.status())
|
.map(|x| x.state())
|
||||||
.unwrap_or(ZoneStatus::Unknown),
|
.unwrap_or(ZoneState::Unknown),
|
||||||
);
|
);
|
||||||
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
||||||
let network = zone.state.as_ref().and_then(|x| x.network.as_ref());
|
let network_status = zone.status.as_ref().and_then(|x| x.network_status.as_ref());
|
||||||
let ipv4 = network.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
|
let ipv4 = network_status.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
|
||||||
let ipv6 = network.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
|
let ipv6 = network_status.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
|
||||||
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
|
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
|
use crate::format::metrics_value_pretty;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::ZoneState;
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Zone, ZoneMetricNode, ZoneStatus},
|
common::{Zone, ZoneMetricNode},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
ListZonesRequest, ReadZoneMetricsRequest,
|
ListZonesRequest, ReadZoneMetricsRequest,
|
||||||
@ -19,8 +21,6 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use tonic::transport::Channel;
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
use crate::format::metrics_value_pretty;
|
|
||||||
|
|
||||||
pub struct MetricState {
|
pub struct MetricState {
|
||||||
pub zone: Zone,
|
pub zone: Zone,
|
||||||
pub root: Option<ZoneMetricNode>,
|
pub root: Option<ZoneMetricNode>,
|
||||||
@ -86,11 +86,11 @@ impl MultiMetricCollector {
|
|||||||
let Some(zone) = changed.zone else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let Some(ref state) = zone.state else {
|
let Some(ref status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
zones.retain(|x| x.id != zone.id);
|
zones.retain(|x| x.id != zone.id);
|
||||||
if state.status() != ZoneStatus::Destroying {
|
if status.state() != ZoneState::Destroying {
|
||||||
zones.push(zone);
|
zones.push(zone);
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
@ -112,11 +112,11 @@ impl MultiMetricCollector {
|
|||||||
|
|
||||||
let mut metrics = Vec::new();
|
let mut metrics = Vec::new();
|
||||||
for zone in &zones {
|
for zone in &zones {
|
||||||
let Some(ref state) = zone.state else {
|
let Some(ref status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if state.status() != ZoneStatus::Started {
|
if status.state() != ZoneState::Created {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ edition = "2021"
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
krata-advmac = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
async-stream = { workspace = true }
|
async-stream = { workspace = true }
|
||||||
async-trait = { workspace = true }
|
async-trait = { workspace = true }
|
||||||
@ -17,14 +18,16 @@ circular-buffer = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.14" }
|
ipnetwork = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.14" }
|
krata = { path = "../krata", version = "^0.0.20" }
|
||||||
krata-runtime = { path = "../runtime", version = "^0.0.14" }
|
krata-oci = { path = "../oci", version = "^0.0.20" }
|
||||||
|
krata-runtime = { path = "../runtime", version = "^0.0.20" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost = { workspace = true }
|
prost = { workspace = true }
|
||||||
redb = { workspace = true }
|
redb = { workspace = true }
|
||||||
scopeguard = { workspace = true }
|
scopeguard = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
signal-hook = { workspace = true }
|
signal-hook = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-stream = { workspace = true }
|
tokio-stream = { workspace = true }
|
||||||
|
@ -15,7 +15,7 @@ use kratad::command::DaemonCommand;
|
|||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let mut builder = env_logger::Builder::new();
|
let mut builder = env_logger::Builder::new();
|
||||||
builder
|
builder
|
||||||
.filter_level(LevelFilter::Trace)
|
.filter_level(LevelFilter::Info)
|
||||||
.parse_default_env()
|
.parse_default_env()
|
||||||
.filter(Some("backhand::filesystem::writer"), LevelFilter::Warn);
|
.filter(Some("backhand::filesystem::writer"), LevelFilter::Warn);
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@ pub struct DaemonConfig {
|
|||||||
pub oci: OciConfig,
|
pub oci: OciConfig,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub pci: DaemonPciConfig,
|
pub pci: DaemonPciConfig,
|
||||||
|
#[serde(default = "default_network")]
|
||||||
|
pub network: DaemonNetworkConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||||
@ -49,15 +51,74 @@ pub enum DaemonPciDeviceRdmReservePolicy {
|
|||||||
Relaxed,
|
Relaxed,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonConfig {
|
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||||
pub async fn load(path: &Path) -> Result<DaemonConfig> {
|
pub struct DaemonNetworkConfig {
|
||||||
if path.exists() {
|
#[serde(default = "default_network_nameservers")]
|
||||||
let content = fs::read_to_string(path).await?;
|
pub nameservers: Vec<String>,
|
||||||
let config: DaemonConfig = toml::from_str(&content)?;
|
#[serde(default = "default_network_ipv4")]
|
||||||
Ok(config)
|
pub ipv4: DaemonIpv4NetworkConfig,
|
||||||
} else {
|
#[serde(default = "default_network_ipv6")]
|
||||||
fs::write(&path, "").await?;
|
pub ipv6: DaemonIpv6NetworkConfig,
|
||||||
Ok(DaemonConfig::default())
|
}
|
||||||
}
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||||
|
pub struct DaemonIpv4NetworkConfig {
|
||||||
|
#[serde(default = "default_network_ipv4_subnet")]
|
||||||
|
pub subnet: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||||
|
pub struct DaemonIpv6NetworkConfig {
|
||||||
|
#[serde(default = "default_network_ipv6_subnet")]
|
||||||
|
pub subnet: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network() -> DaemonNetworkConfig {
|
||||||
|
DaemonNetworkConfig {
|
||||||
|
nameservers: default_network_nameservers(),
|
||||||
|
ipv4: default_network_ipv4(),
|
||||||
|
ipv6: default_network_ipv6(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network_nameservers() -> Vec<String> {
|
||||||
|
vec![
|
||||||
|
"1.1.1.1".to_string(),
|
||||||
|
"1.0.0.1".to_string(),
|
||||||
|
"2606:4700:4700::1111".to_string(),
|
||||||
|
"2606:4700:4700::1001".to_string(),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network_ipv4() -> DaemonIpv4NetworkConfig {
|
||||||
|
DaemonIpv4NetworkConfig {
|
||||||
|
subnet: default_network_ipv4_subnet(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network_ipv4_subnet() -> String {
|
||||||
|
"10.75.0.0/16".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network_ipv6() -> DaemonIpv6NetworkConfig {
|
||||||
|
DaemonIpv6NetworkConfig {
|
||||||
|
subnet: default_network_ipv6_subnet(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_network_ipv6_subnet() -> String {
|
||||||
|
"fdd4:1476:6c7e::/48".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DaemonConfig {
|
||||||
|
pub async fn load(path: &Path) -> Result<DaemonConfig> {
|
||||||
|
if !path.exists() {
|
||||||
|
let config: DaemonConfig = toml::from_str("")?;
|
||||||
|
let content = toml::to_string_pretty(&config)?;
|
||||||
|
fs::write(&path, content).await?;
|
||||||
|
}
|
||||||
|
let content = fs::read_to_string(path).await?;
|
||||||
|
let config: DaemonConfig = toml::from_str(&content)?;
|
||||||
|
Ok(config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonConsoleHandle {
|
pub struct DaemonConsoleHandle {
|
||||||
glt: ZoneLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
sender: Sender<(u32, Vec<u8>)>,
|
sender: Sender<(u32, Vec<u8>)>,
|
||||||
@ -57,7 +57,7 @@ impl DaemonConsoleHandle {
|
|||||||
uuid: Uuid,
|
uuid: Uuid,
|
||||||
sender: Sender<Vec<u8>>,
|
sender: Sender<Vec<u8>>,
|
||||||
) -> Result<DaemonConsoleAttachHandle> {
|
) -> Result<DaemonConsoleAttachHandle> {
|
||||||
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
|
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
|
||||||
return Err(anyhow!("unable to find domain {}", uuid));
|
return Err(anyhow!("unable to find domain {}", uuid));
|
||||||
};
|
};
|
||||||
let buffers = self.buffers.lock().await;
|
let buffers = self.buffers.lock().await;
|
||||||
@ -84,7 +84,7 @@ impl Drop for DaemonConsoleHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonConsole {
|
pub struct DaemonConsole {
|
||||||
glt: ZoneLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||||
@ -93,14 +93,14 @@ pub struct DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonConsole {
|
impl DaemonConsole {
|
||||||
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonConsole> {
|
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonConsole> {
|
||||||
let (service, sender, receiver) =
|
let (service, sender, receiver) =
|
||||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||||
let task = service.launch().await?;
|
let task = service.launch().await?;
|
||||||
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let buffers = Arc::new(Mutex::new(HashMap::new()));
|
let buffers = Arc::new(Mutex::new(HashMap::new()));
|
||||||
Ok(DaemonConsole {
|
Ok(DaemonConsole {
|
||||||
glt,
|
zlt,
|
||||||
listeners,
|
listeners,
|
||||||
buffers,
|
buffers,
|
||||||
receiver,
|
receiver,
|
||||||
@ -110,7 +110,7 @@ impl DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
|
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
|
||||||
let glt = self.glt.clone();
|
let zlt = self.zlt.clone();
|
||||||
let listeners = self.listeners.clone();
|
let listeners = self.listeners.clone();
|
||||||
let buffers = self.buffers.clone();
|
let buffers = self.buffers.clone();
|
||||||
let sender = self.sender.clone();
|
let sender = self.sender.clone();
|
||||||
@ -120,7 +120,7 @@ impl DaemonConsole {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(DaemonConsoleHandle {
|
Ok(DaemonConsoleHandle {
|
||||||
glt,
|
zlt,
|
||||||
listeners,
|
listeners,
|
||||||
buffers,
|
buffers,
|
||||||
sender,
|
sender,
|
||||||
|
@ -1,611 +0,0 @@
|
|||||||
use async_stream::try_stream;
|
|
||||||
use futures::Stream;
|
|
||||||
use krata::{
|
|
||||||
idm::internal::{
|
|
||||||
exec_stream_request_update::Update, request::Request as IdmRequestType,
|
|
||||||
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
|
|
||||||
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
|
||||||
},
|
|
||||||
v1::{
|
|
||||||
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
|
|
||||||
control::{
|
|
||||||
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
|
|
||||||
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
|
|
||||||
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
|
|
||||||
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
|
|
||||||
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
|
||||||
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
|
|
||||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
|
|
||||||
ZoneConsoleRequest,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use krataoci::{
|
|
||||||
name::ImageName,
|
|
||||||
packer::{service::OciPackerService, OciPackedFormat, OciPackedImage},
|
|
||||||
progress::{OciProgress, OciProgressContext},
|
|
||||||
};
|
|
||||||
use kratart::Runtime;
|
|
||||||
use std::{pin::Pin, str::FromStr};
|
|
||||||
use tokio::{
|
|
||||||
select,
|
|
||||||
sync::mpsc::{channel, Sender},
|
|
||||||
task::JoinError,
|
|
||||||
};
|
|
||||||
use tokio_stream::StreamExt;
|
|
||||||
use tonic::{Request, Response, Status, Streaming};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
|
|
||||||
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
|
|
||||||
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct ApiError {
|
|
||||||
message: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<anyhow::Error> for ApiError {
|
|
||||||
fn from(value: anyhow::Error) -> Self {
|
|
||||||
ApiError {
|
|
||||||
message: value.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ApiError> for Status {
|
|
||||||
fn from(value: ApiError) -> Self {
|
|
||||||
Status::unknown(value.message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct DaemonControlService {
|
|
||||||
glt: ZoneLookupTable,
|
|
||||||
devices: DaemonDeviceManager,
|
|
||||||
events: DaemonEventContext,
|
|
||||||
console: DaemonConsoleHandle,
|
|
||||||
idm: DaemonIdmHandle,
|
|
||||||
zones: ZoneStore,
|
|
||||||
zone_reconciler_notify: Sender<Uuid>,
|
|
||||||
packer: OciPackerService,
|
|
||||||
runtime: Runtime,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DaemonControlService {
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn new(
|
|
||||||
glt: ZoneLookupTable,
|
|
||||||
devices: DaemonDeviceManager,
|
|
||||||
events: DaemonEventContext,
|
|
||||||
console: DaemonConsoleHandle,
|
|
||||||
idm: DaemonIdmHandle,
|
|
||||||
zones: ZoneStore,
|
|
||||||
zone_reconciler_notify: Sender<Uuid>,
|
|
||||||
packer: OciPackerService,
|
|
||||||
runtime: Runtime,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
glt,
|
|
||||||
devices,
|
|
||||||
events,
|
|
||||||
console,
|
|
||||||
idm,
|
|
||||||
zones,
|
|
||||||
zone_reconciler_notify,
|
|
||||||
packer,
|
|
||||||
runtime,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ConsoleDataSelect {
|
|
||||||
Read(Option<Vec<u8>>),
|
|
||||||
Write(Option<Result<ZoneConsoleRequest, Status>>),
|
|
||||||
}
|
|
||||||
|
|
||||||
enum PullImageSelect {
|
|
||||||
Progress(Option<OciProgress>),
|
|
||||||
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tonic::async_trait]
|
|
||||||
impl ControlService for DaemonControlService {
|
|
||||||
type ExecZoneStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
|
|
||||||
|
|
||||||
type AttachZoneConsoleStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
|
||||||
|
|
||||||
type PullImageStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
|
||||||
|
|
||||||
type WatchEventsStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
|
|
||||||
|
|
||||||
type SnoopIdmStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
|
||||||
|
|
||||||
async fn identify_host(
|
|
||||||
&self,
|
|
||||||
request: Request<IdentifyHostRequest>,
|
|
||||||
) -> Result<Response<IdentifyHostReply>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
Ok(Response::new(IdentifyHostReply {
|
|
||||||
host_domid: self.glt.host_domid(),
|
|
||||||
host_uuid: self.glt.host_uuid().to_string(),
|
|
||||||
krata_version: DaemonCommand::version(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_zone(
|
|
||||||
&self,
|
|
||||||
request: Request<CreateZoneRequest>,
|
|
||||||
) -> Result<Response<CreateZoneReply>, Status> {
|
|
||||||
let request = request.into_inner();
|
|
||||||
let Some(spec) = request.spec else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "zone spec not provided".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
let uuid = Uuid::new_v4();
|
|
||||||
self.zones
|
|
||||||
.update(
|
|
||||||
uuid,
|
|
||||||
Zone {
|
|
||||||
id: uuid.to_string(),
|
|
||||||
state: Some(ZoneState {
|
|
||||||
status: ZoneStatus::Starting.into(),
|
|
||||||
network: None,
|
|
||||||
exit_info: None,
|
|
||||||
error_info: None,
|
|
||||||
host: self.glt.host_uuid().to_string(),
|
|
||||||
domid: u32::MAX,
|
|
||||||
}),
|
|
||||||
spec: Some(spec),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
self.zone_reconciler_notify
|
|
||||||
.send(uuid)
|
|
||||||
.await
|
|
||||||
.map_err(|x| ApiError {
|
|
||||||
message: x.to_string(),
|
|
||||||
})?;
|
|
||||||
Ok(Response::new(CreateZoneReply {
|
|
||||||
zone_id: uuid.to_string(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn exec_zone(
|
|
||||||
&self,
|
|
||||||
request: Request<Streaming<ExecZoneRequest>>,
|
|
||||||
) -> Result<Response<Self::ExecZoneStream>, Status> {
|
|
||||||
let mut input = request.into_inner();
|
|
||||||
let Some(request) = input.next().await else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "expected to have at least one request".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
let request = request?;
|
|
||||||
|
|
||||||
let Some(task) = request.task else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "task is missing".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
|
|
||||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let idm_request = IdmRequest {
|
|
||||||
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
|
||||||
update: Some(Update::Start(ExecStreamRequestStart {
|
|
||||||
environment: task
|
|
||||||
.environment
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| ExecEnvVar {
|
|
||||||
key: x.key,
|
|
||||||
value: x.value,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
command: task.command,
|
|
||||||
working_directory: task.working_directory,
|
|
||||||
})),
|
|
||||||
})),
|
|
||||||
};
|
|
||||||
|
|
||||||
let output = try_stream! {
|
|
||||||
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
|
|
||||||
message: x.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
select! {
|
|
||||||
x = input.next() => if let Some(update) = x {
|
|
||||||
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
|
|
||||||
message: error.to_string()
|
|
||||||
}.into());
|
|
||||||
|
|
||||||
if let Ok(update) = update {
|
|
||||||
if !update.data.is_empty() {
|
|
||||||
let _ = handle.update(IdmRequest {
|
|
||||||
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
|
||||||
update: Some(Update::Stdin(ExecStreamRequestStdin {
|
|
||||||
data: update.data,
|
|
||||||
})),
|
|
||||||
}))}).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
x = handle.receiver.recv() => match x {
|
|
||||||
Some(response) => {
|
|
||||||
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
let reply = ExecZoneReply {
|
|
||||||
exited: update.exited,
|
|
||||||
error: update.error,
|
|
||||||
exit_code: update.exit_code,
|
|
||||||
stdout: update.stdout,
|
|
||||||
stderr: update.stderr
|
|
||||||
};
|
|
||||||
yield reply;
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn destroy_zone(
|
|
||||||
&self,
|
|
||||||
request: Request<DestroyZoneRequest>,
|
|
||||||
) -> Result<Response<DestroyZoneReply>, Status> {
|
|
||||||
let request = request.into_inner();
|
|
||||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
let Some(mut zone) = self.zones.read(uuid).await.map_err(ApiError::from)? else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "zone not found".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
|
|
||||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
|
||||||
|
|
||||||
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "zone already destroyed".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
|
|
||||||
self.zones
|
|
||||||
.update(uuid, zone)
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
self.zone_reconciler_notify
|
|
||||||
.send(uuid)
|
|
||||||
.await
|
|
||||||
.map_err(|x| ApiError {
|
|
||||||
message: x.to_string(),
|
|
||||||
})?;
|
|
||||||
Ok(Response::new(DestroyZoneReply {}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_zones(
|
|
||||||
&self,
|
|
||||||
request: Request<ListZonesRequest>,
|
|
||||||
) -> Result<Response<ListZonesReply>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
|
||||||
let zones = zones.into_values().collect::<Vec<Zone>>();
|
|
||||||
Ok(Response::new(ListZonesReply { zones }))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn resolve_zone(
|
|
||||||
&self,
|
|
||||||
request: Request<ResolveZoneRequest>,
|
|
||||||
) -> Result<Response<ResolveZoneReply>, Status> {
|
|
||||||
let request = request.into_inner();
|
|
||||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
|
||||||
let zones = zones
|
|
||||||
.into_values()
|
|
||||||
.filter(|x| {
|
|
||||||
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
|
||||||
(!request.name.is_empty() && comparison_spec.name == request.name)
|
|
||||||
|| x.id == request.name
|
|
||||||
})
|
|
||||||
.collect::<Vec<Zone>>();
|
|
||||||
Ok(Response::new(ResolveZoneReply {
|
|
||||||
zone: zones.first().cloned(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn attach_zone_console(
|
|
||||||
&self,
|
|
||||||
request: Request<Streaming<ZoneConsoleRequest>>,
|
|
||||||
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
|
|
||||||
let mut input = request.into_inner();
|
|
||||||
let Some(request) = input.next().await else {
|
|
||||||
return Err(ApiError {
|
|
||||||
message: "expected to have at least one request".to_string(),
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
};
|
|
||||||
let request = request?;
|
|
||||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
let (sender, mut receiver) = channel(100);
|
|
||||||
let console = self
|
|
||||||
.console
|
|
||||||
.attach(uuid, sender)
|
|
||||||
.await
|
|
||||||
.map_err(|error| ApiError {
|
|
||||||
message: format!("failed to attach to console: {}", error),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let output = try_stream! {
|
|
||||||
yield ZoneConsoleReply { data: console.initial.clone(), };
|
|
||||||
loop {
|
|
||||||
let what = select! {
|
|
||||||
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
|
||||||
x = input.next() => ConsoleDataSelect::Write(x),
|
|
||||||
};
|
|
||||||
|
|
||||||
match what {
|
|
||||||
ConsoleDataSelect::Read(Some(data)) => {
|
|
||||||
yield ZoneConsoleReply { data, };
|
|
||||||
},
|
|
||||||
|
|
||||||
ConsoleDataSelect::Read(None) => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
ConsoleDataSelect::Write(Some(request)) => {
|
|
||||||
let request = request?;
|
|
||||||
if !request.data.is_empty() {
|
|
||||||
console.send(request.data).await.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
ConsoleDataSelect::Write(None) => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Response::new(
|
|
||||||
Box::pin(output) as Self::AttachZoneConsoleStream
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_zone_metrics(
|
|
||||||
&self,
|
|
||||||
request: Request<ReadZoneMetricsRequest>,
|
|
||||||
) -> Result<Response<ReadZoneMetricsReply>, Status> {
|
|
||||||
let request = request.into_inner();
|
|
||||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let response = client
|
|
||||||
.send(IdmRequest {
|
|
||||||
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let mut reply = ReadZoneMetricsReply::default();
|
|
||||||
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
|
||||||
reply.root = metrics.root.map(idm_metric_to_api);
|
|
||||||
}
|
|
||||||
Ok(Response::new(reply))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pull_image(
|
|
||||||
&self,
|
|
||||||
request: Request<PullImageRequest>,
|
|
||||||
) -> Result<Response<Self::PullImageStream>, Status> {
|
|
||||||
let request = request.into_inner();
|
|
||||||
let name = ImageName::parse(&request.image).map_err(|err| ApiError {
|
|
||||||
message: err.to_string(),
|
|
||||||
})?;
|
|
||||||
let format = match request.format() {
|
|
||||||
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
|
||||||
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
|
||||||
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
|
||||||
OciImageFormat::Tar => OciPackedFormat::Tar,
|
|
||||||
};
|
|
||||||
let (context, mut receiver) = OciProgressContext::create();
|
|
||||||
let our_packer = self.packer.clone();
|
|
||||||
|
|
||||||
let output = try_stream! {
|
|
||||||
let mut task = tokio::task::spawn(async move {
|
|
||||||
our_packer.request(name, format, request.overwrite_cache, request.update, context).await
|
|
||||||
});
|
|
||||||
let abort_handle = task.abort_handle();
|
|
||||||
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
|
|
||||||
handle.abort();
|
|
||||||
});
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let what = select! {
|
|
||||||
x = receiver.changed() => match x {
|
|
||||||
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
|
|
||||||
Err(_) => PullImageSelect::Progress(None),
|
|
||||||
},
|
|
||||||
x = &mut task => PullImageSelect::Completed(x),
|
|
||||||
};
|
|
||||||
match what {
|
|
||||||
PullImageSelect::Progress(Some(progress)) => {
|
|
||||||
let reply = PullImageReply {
|
|
||||||
progress: Some(convert_oci_progress(progress)),
|
|
||||||
digest: String::new(),
|
|
||||||
format: OciImageFormat::Unknown.into(),
|
|
||||||
};
|
|
||||||
yield reply;
|
|
||||||
},
|
|
||||||
|
|
||||||
PullImageSelect::Completed(result) => {
|
|
||||||
let result = result.map_err(|err| ApiError {
|
|
||||||
message: err.to_string(),
|
|
||||||
})?;
|
|
||||||
let packed = result.map_err(|err| ApiError {
|
|
||||||
message: err.to_string(),
|
|
||||||
})?;
|
|
||||||
let reply = PullImageReply {
|
|
||||||
progress: None,
|
|
||||||
digest: packed.digest,
|
|
||||||
format: match packed.format {
|
|
||||||
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
|
||||||
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
|
|
||||||
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
yield reply;
|
|
||||||
break;
|
|
||||||
},
|
|
||||||
|
|
||||||
_ => {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::PullImageStream))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn watch_events(
|
|
||||||
&self,
|
|
||||||
request: Request<WatchEventsRequest>,
|
|
||||||
) -> Result<Response<Self::WatchEventsStream>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
let mut events = self.events.subscribe();
|
|
||||||
let output = try_stream! {
|
|
||||||
while let Ok(event) = events.recv().await {
|
|
||||||
yield WatchEventsReply { event: Some(event), };
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::WatchEventsStream))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn snoop_idm(
|
|
||||||
&self,
|
|
||||||
request: Request<SnoopIdmRequest>,
|
|
||||||
) -> Result<Response<Self::SnoopIdmStream>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
let mut messages = self.idm.snoop();
|
|
||||||
let glt = self.glt.clone();
|
|
||||||
let output = try_stream! {
|
|
||||||
while let Ok(event) = messages.recv().await {
|
|
||||||
let Some(from_uuid) = glt.lookup_uuid_by_domid(event.from).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(to_uuid) = glt.lookup_uuid_by_domid(event.to).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_devices(
|
|
||||||
&self,
|
|
||||||
request: Request<ListDevicesRequest>,
|
|
||||||
) -> Result<Response<ListDevicesReply>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
let mut devices = Vec::new();
|
|
||||||
let state = self.devices.copy().await.map_err(|error| ApiError {
|
|
||||||
message: error.to_string(),
|
|
||||||
})?;
|
|
||||||
for (name, state) in state {
|
|
||||||
devices.push(DeviceInfo {
|
|
||||||
name,
|
|
||||||
claimed: state.owner.is_some(),
|
|
||||||
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(Response::new(ListDevicesReply { devices }))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_host_cpu_topology(
|
|
||||||
&self,
|
|
||||||
request: Request<HostCpuTopologyRequest>,
|
|
||||||
) -> Result<Response<HostCpuTopologyReply>, Status> {
|
|
||||||
let _ = request.into_inner();
|
|
||||||
let power = self
|
|
||||||
.runtime
|
|
||||||
.power_management_context()
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
let cputopo = power.cpu_topology().await.map_err(ApiError::from)?;
|
|
||||||
let mut cpus = vec![];
|
|
||||||
|
|
||||||
for cpu in cputopo {
|
|
||||||
cpus.push(HostCpuTopologyInfo {
|
|
||||||
core: cpu.core,
|
|
||||||
socket: cpu.socket,
|
|
||||||
node: cpu.node,
|
|
||||||
thread: cpu.thread,
|
|
||||||
class: cpu.class as i32,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Response::new(HostCpuTopologyReply { cpus }))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn set_host_power_management_policy(
|
|
||||||
&self,
|
|
||||||
request: Request<HostPowerManagementPolicy>,
|
|
||||||
) -> Result<Response<HostPowerManagementPolicy>, Status> {
|
|
||||||
let policy = request.into_inner();
|
|
||||||
let power = self
|
|
||||||
.runtime
|
|
||||||
.power_management_context()
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
let scheduler = &policy.scheduler;
|
|
||||||
|
|
||||||
power
|
|
||||||
.set_smt_policy(policy.smt_awareness)
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
power
|
|
||||||
.set_scheduler_policy(scheduler)
|
|
||||||
.await
|
|
||||||
.map_err(ApiError::from)?;
|
|
||||||
|
|
||||||
Ok(Response::new(HostPowerManagementPolicy {
|
|
||||||
scheduler: scheduler.to_string(),
|
|
||||||
smt_awareness: policy.smt_awareness,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
84
crates/daemon/src/control/attach_zone_console.rs
Normal file
84
crates/daemon/src/control/attach_zone_console.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use async_stream::try_stream;
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::mpsc::channel;
|
||||||
|
use tokio_stream::{Stream, StreamExt};
|
||||||
|
use tonic::{Status, Streaming};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::v1::control::{ZoneConsoleReply, ZoneConsoleRequest};
|
||||||
|
|
||||||
|
use crate::console::DaemonConsoleHandle;
|
||||||
|
use crate::control::ApiError;
|
||||||
|
|
||||||
|
enum ConsoleDataSelect {
|
||||||
|
Read(Option<Vec<u8>>),
|
||||||
|
Write(Option<Result<ZoneConsoleRequest, Status>>),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AttachZoneConsoleRpc {
|
||||||
|
console: DaemonConsoleHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AttachZoneConsoleRpc {
|
||||||
|
pub fn new(console: DaemonConsoleHandle) -> Self {
|
||||||
|
Self { console }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
mut input: Streaming<ZoneConsoleRequest>,
|
||||||
|
) -> Result<Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>>
|
||||||
|
{
|
||||||
|
let Some(request) = input.next().await else {
|
||||||
|
return Err(anyhow!("expected to have at least one request"));
|
||||||
|
};
|
||||||
|
let request = request?;
|
||||||
|
let uuid = Uuid::from_str(&request.zone_id)?;
|
||||||
|
let (sender, mut receiver) = channel(100);
|
||||||
|
let console = self
|
||||||
|
.console
|
||||||
|
.attach(uuid, sender)
|
||||||
|
.await
|
||||||
|
.map_err(|error| anyhow!("failed to attach to console: {}", error))?;
|
||||||
|
|
||||||
|
let output = try_stream! {
|
||||||
|
if request.replay_history {
|
||||||
|
yield ZoneConsoleReply { data: console.initial.clone(), };
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
let what = select! {
|
||||||
|
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
||||||
|
x = input.next() => ConsoleDataSelect::Write(x),
|
||||||
|
};
|
||||||
|
|
||||||
|
match what {
|
||||||
|
ConsoleDataSelect::Read(Some(data)) => {
|
||||||
|
yield ZoneConsoleReply { data, };
|
||||||
|
},
|
||||||
|
|
||||||
|
ConsoleDataSelect::Read(None) => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConsoleDataSelect::Write(Some(request)) => {
|
||||||
|
let request = request?;
|
||||||
|
if !request.data.is_empty() {
|
||||||
|
console.send(request.data).await.map_err(|error| ApiError {
|
||||||
|
message: error.to_string(),
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
ConsoleDataSelect::Write(None) => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Box::pin(output))
|
||||||
|
}
|
||||||
|
}
|
56
crates/daemon/src/control/create_zone.rs
Normal file
56
crates/daemon/src/control/create_zone.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use krata::v1::common::{Zone, ZoneState, ZoneStatus};
|
||||||
|
use krata::v1::control::{CreateZoneReply, CreateZoneRequest};
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
pub struct CreateZoneRpc {
|
||||||
|
zones: ZoneStore,
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CreateZoneRpc {
|
||||||
|
pub fn new(
|
||||||
|
zones: ZoneStore,
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
zones,
|
||||||
|
zlt,
|
||||||
|
zone_reconciler_notify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, request: CreateZoneRequest) -> Result<CreateZoneReply> {
|
||||||
|
let Some(spec) = request.spec else {
|
||||||
|
return Err(anyhow!("zone spec not provided"));
|
||||||
|
};
|
||||||
|
let uuid = Uuid::new_v4();
|
||||||
|
self.zones
|
||||||
|
.update(
|
||||||
|
uuid,
|
||||||
|
Zone {
|
||||||
|
id: uuid.to_string(),
|
||||||
|
status: Some(ZoneStatus {
|
||||||
|
state: ZoneState::Creating.into(),
|
||||||
|
network_status: None,
|
||||||
|
exit_status: None,
|
||||||
|
error_status: None,
|
||||||
|
resource_status: None,
|
||||||
|
host: self.zlt.host_uuid().to_string(),
|
||||||
|
domid: u32::MAX,
|
||||||
|
}),
|
||||||
|
spec: Some(spec),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
self.zone_reconciler_notify.send(uuid).await?;
|
||||||
|
Ok(CreateZoneReply {
|
||||||
|
zone_id: uuid.to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
42
crates/daemon/src/control/destroy_zone.rs
Normal file
42
crates/daemon/src/control/destroy_zone.rs
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::v1::common::ZoneState;
|
||||||
|
use krata::v1::control::{DestroyZoneReply, DestroyZoneRequest};
|
||||||
|
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
|
||||||
|
pub struct DestroyZoneRpc {
|
||||||
|
zones: ZoneStore,
|
||||||
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DestroyZoneRpc {
|
||||||
|
pub fn new(zones: ZoneStore, zone_reconciler_notify: Sender<Uuid>) -> Self {
|
||||||
|
Self {
|
||||||
|
zones,
|
||||||
|
zone_reconciler_notify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, request: DestroyZoneRequest) -> Result<DestroyZoneReply> {
|
||||||
|
let uuid = Uuid::from_str(&request.zone_id)?;
|
||||||
|
let Some(mut zone) = self.zones.read(uuid).await? else {
|
||||||
|
return Err(anyhow!("zone not found"));
|
||||||
|
};
|
||||||
|
|
||||||
|
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
|
||||||
|
|
||||||
|
if zone.status.as_ref().unwrap().state() == ZoneState::Destroyed {
|
||||||
|
return Err(anyhow!("zone already destroyed"));
|
||||||
|
}
|
||||||
|
|
||||||
|
zone.status.as_mut().unwrap().state = ZoneState::Destroying.into();
|
||||||
|
self.zones.update(uuid, zone).await?;
|
||||||
|
self.zone_reconciler_notify.send(uuid).await?;
|
||||||
|
Ok(DestroyZoneReply {})
|
||||||
|
}
|
||||||
|
}
|
133
crates/daemon/src/control/exec_inside_zone.rs
Normal file
133
crates/daemon/src/control/exec_inside_zone.rs
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use async_stream::try_stream;
|
||||||
|
use tokio::select;
|
||||||
|
use tokio_stream::{Stream, StreamExt};
|
||||||
|
use tonic::{Status, Streaming};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::idm::internal::Request;
|
||||||
|
use krata::{
|
||||||
|
idm::internal::{
|
||||||
|
exec_stream_request_update::Update, request::Request as IdmRequestType,
|
||||||
|
response::Response as IdmResponseType, ExecEnvVar, ExecStreamRequestStart,
|
||||||
|
ExecStreamRequestStdin, ExecStreamRequestTerminalSize, ExecStreamRequestUpdate,
|
||||||
|
Request as IdmRequest,
|
||||||
|
},
|
||||||
|
v1::control::{ExecInsideZoneReply, ExecInsideZoneRequest},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::control::ApiError;
|
||||||
|
use crate::idm::DaemonIdmHandle;
|
||||||
|
|
||||||
|
pub struct ExecInsideZoneRpc {
|
||||||
|
idm: DaemonIdmHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecInsideZoneRpc {
|
||||||
|
pub fn new(idm: DaemonIdmHandle) -> Self {
|
||||||
|
Self { idm }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
mut input: Streaming<ExecInsideZoneRequest>,
|
||||||
|
) -> Result<Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>>
|
||||||
|
{
|
||||||
|
let Some(request) = input.next().await else {
|
||||||
|
return Err(anyhow!("expected to have at least one request"));
|
||||||
|
};
|
||||||
|
let request = request?;
|
||||||
|
|
||||||
|
let Some(task) = request.task else {
|
||||||
|
return Err(anyhow!("task is missing"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let uuid = Uuid::from_str(&request.zone_id)?;
|
||||||
|
let idm = self.idm.client(uuid).await?;
|
||||||
|
|
||||||
|
let idm_request = Request {
|
||||||
|
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
||||||
|
update: Some(Update::Start(ExecStreamRequestStart {
|
||||||
|
environment: task
|
||||||
|
.environment
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| ExecEnvVar {
|
||||||
|
key: x.key,
|
||||||
|
value: x.value,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
command: task.command,
|
||||||
|
working_directory: task.working_directory,
|
||||||
|
tty: task.tty,
|
||||||
|
terminal_size: request.terminal_size.map(|size| {
|
||||||
|
ExecStreamRequestTerminalSize {
|
||||||
|
rows: size.rows,
|
||||||
|
columns: size.columns,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
})),
|
||||||
|
})),
|
||||||
|
};
|
||||||
|
|
||||||
|
let output = try_stream! {
|
||||||
|
let mut handle = idm.send_stream(idm_request).await.map_err(|x| ApiError {
|
||||||
|
message: x.to_string(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
x = input.next() => if let Some(update) = x {
|
||||||
|
let update: Result<ExecInsideZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||||
|
message: error.to_string()
|
||||||
|
}.into());
|
||||||
|
|
||||||
|
if let Ok(update) = update {
|
||||||
|
if !update.stdin.is_empty() {
|
||||||
|
let _ = handle.update(IdmRequest {
|
||||||
|
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
||||||
|
update: Some(Update::Stdin(ExecStreamRequestStdin {
|
||||||
|
data: update.stdin,
|
||||||
|
closed: update.stdin_closed,
|
||||||
|
})),
|
||||||
|
}))}).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref terminal_size) = update.terminal_size {
|
||||||
|
let _ = handle.update(IdmRequest {
|
||||||
|
request: Some(IdmRequestType::ExecStream(ExecStreamRequestUpdate {
|
||||||
|
update: Some(Update::TerminalResize(ExecStreamRequestTerminalSize {
|
||||||
|
rows: terminal_size.rows,
|
||||||
|
columns: terminal_size.columns,
|
||||||
|
})),
|
||||||
|
}))}).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
x = handle.receiver.recv() => match x {
|
||||||
|
Some(response) => {
|
||||||
|
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
let reply = ExecInsideZoneReply {
|
||||||
|
exited: update.exited,
|
||||||
|
error: update.error,
|
||||||
|
exit_code: update.exit_code,
|
||||||
|
stdout: update.stdout,
|
||||||
|
stderr: update.stderr,
|
||||||
|
};
|
||||||
|
yield reply;
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Box::pin(output))
|
||||||
|
}
|
||||||
|
}
|
33
crates/daemon/src/control/get_host_cpu_topology.rs
Normal file
33
crates/daemon/src/control/get_host_cpu_topology.rs
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::control::{GetHostCpuTopologyReply, GetHostCpuTopologyRequest, HostCpuTopologyInfo};
|
||||||
|
use kratart::Runtime;
|
||||||
|
|
||||||
|
pub struct GetHostCpuTopologyRpc {
|
||||||
|
runtime: Runtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GetHostCpuTopologyRpc {
|
||||||
|
pub fn new(runtime: Runtime) -> Self {
|
||||||
|
Self { runtime }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
_request: GetHostCpuTopologyRequest,
|
||||||
|
) -> Result<GetHostCpuTopologyReply> {
|
||||||
|
let power = self.runtime.power_management_context().await?;
|
||||||
|
let cpu_topology = power.cpu_topology().await?;
|
||||||
|
let mut cpus = vec![];
|
||||||
|
|
||||||
|
for cpu in cpu_topology {
|
||||||
|
cpus.push(HostCpuTopologyInfo {
|
||||||
|
core: cpu.core,
|
||||||
|
socket: cpu.socket,
|
||||||
|
node: cpu.node,
|
||||||
|
thread: cpu.thread,
|
||||||
|
class: cpu.class as i32,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Ok(GetHostCpuTopologyReply { cpus })
|
||||||
|
}
|
||||||
|
}
|
37
crates/daemon/src/control/get_host_status.rs
Normal file
37
crates/daemon/src/control/get_host_status.rs
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
use crate::command::DaemonCommand;
|
||||||
|
use crate::network::assignment::NetworkAssignment;
|
||||||
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::control::{GetHostStatusReply, GetHostStatusRequest};
|
||||||
|
|
||||||
|
pub struct GetHostStatusRpc {
|
||||||
|
network: NetworkAssignment,
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GetHostStatusRpc {
|
||||||
|
pub fn new(ip: NetworkAssignment, zlt: ZoneLookupTable) -> Self {
|
||||||
|
Self { network: ip, zlt }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, _request: GetHostStatusRequest) -> Result<GetHostStatusReply> {
|
||||||
|
let host_reservation = self.network.retrieve(self.zlt.host_uuid()).await?;
|
||||||
|
Ok(GetHostStatusReply {
|
||||||
|
host_domid: self.zlt.host_domid(),
|
||||||
|
host_uuid: self.zlt.host_uuid().to_string(),
|
||||||
|
krata_version: DaemonCommand::version(),
|
||||||
|
host_ipv4: host_reservation
|
||||||
|
.as_ref()
|
||||||
|
.map(|x| format!("{}/{}", x.ipv4, x.ipv4_prefix))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
host_ipv6: host_reservation
|
||||||
|
.as_ref()
|
||||||
|
.map(|x| format!("{}/{}", x.ipv6, x.ipv6_prefix))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
host_mac: host_reservation
|
||||||
|
.as_ref()
|
||||||
|
.map(|x| x.mac.to_string().to_lowercase().replace('-', ":"))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
24
crates/daemon/src/control/get_zone.rs
Normal file
24
crates/daemon/src/control/get_zone.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::v1::control::{GetZoneReply, GetZoneRequest};
|
||||||
|
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
|
||||||
|
pub struct GetZoneRpc {
|
||||||
|
zones: ZoneStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GetZoneRpc {
|
||||||
|
pub fn new(zones: ZoneStore) -> Self {
|
||||||
|
Self { zones }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, request: GetZoneRequest) -> Result<GetZoneReply> {
|
||||||
|
let mut zones = self.zones.list().await?;
|
||||||
|
let zone = zones.remove(&Uuid::from_str(&request.zone_id)?);
|
||||||
|
Ok(GetZoneReply { zone })
|
||||||
|
}
|
||||||
|
}
|
28
crates/daemon/src/control/list_devices.rs
Normal file
28
crates/daemon/src/control/list_devices.rs
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use krata::v1::control::{DeviceInfo, ListDevicesReply, ListDevicesRequest};
|
||||||
|
|
||||||
|
use crate::devices::DaemonDeviceManager;
|
||||||
|
|
||||||
|
pub struct ListDevicesRpc {
|
||||||
|
devices: DaemonDeviceManager,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ListDevicesRpc {
|
||||||
|
pub fn new(devices: DaemonDeviceManager) -> Self {
|
||||||
|
Self { devices }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, _request: ListDevicesRequest) -> Result<ListDevicesReply> {
|
||||||
|
let mut devices = Vec::new();
|
||||||
|
let state = self.devices.copy().await?;
|
||||||
|
for (name, state) in state {
|
||||||
|
devices.push(DeviceInfo {
|
||||||
|
name,
|
||||||
|
claimed: state.owner.is_some(),
|
||||||
|
owner: state.owner.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(ListDevicesReply { devices })
|
||||||
|
}
|
||||||
|
}
|
28
crates/daemon/src/control/list_network_reservations.rs
Normal file
28
crates/daemon/src/control/list_network_reservations.rs
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use krata::v1::{
|
||||||
|
common::NetworkReservation,
|
||||||
|
control::{ListNetworkReservationsReply, ListNetworkReservationsRequest},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::network::assignment::NetworkAssignment;
|
||||||
|
|
||||||
|
pub struct ListNetworkReservationsRpc {
|
||||||
|
network: NetworkAssignment,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ListNetworkReservationsRpc {
|
||||||
|
pub fn new(network: NetworkAssignment) -> Self {
|
||||||
|
Self { network }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
_request: ListNetworkReservationsRequest,
|
||||||
|
) -> Result<ListNetworkReservationsReply> {
|
||||||
|
let state = self.network.read_reservations().await?;
|
||||||
|
let reservations: Vec<NetworkReservation> =
|
||||||
|
state.into_values().map(|x| x.into()).collect::<Vec<_>>();
|
||||||
|
Ok(ListNetworkReservationsReply { reservations })
|
||||||
|
}
|
||||||
|
}
|
21
crates/daemon/src/control/list_zones.rs
Normal file
21
crates/daemon/src/control/list_zones.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::Zone;
|
||||||
|
use krata::v1::control::{ListZonesReply, ListZonesRequest};
|
||||||
|
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
|
||||||
|
pub struct ListZonesRpc {
|
||||||
|
zones: ZoneStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ListZonesRpc {
|
||||||
|
pub fn new(zones: ZoneStore) -> Self {
|
||||||
|
Self { zones }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, _request: ListZonesRequest) -> Result<ListZonesReply> {
|
||||||
|
let zones = self.zones.list().await?;
|
||||||
|
let zones = zones.into_values().collect::<Vec<Zone>>();
|
||||||
|
Ok(ListZonesReply { zones })
|
||||||
|
}
|
||||||
|
}
|
365
crates/daemon/src/control/mod.rs
Normal file
365
crates/daemon/src/control/mod.rs
Normal file
@ -0,0 +1,365 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use futures::Stream;
|
||||||
|
use list_network_reservations::ListNetworkReservationsRpc;
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
|
use tonic::{Request, Response, Status, Streaming};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::v1::control::{
|
||||||
|
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest, DestroyZoneReply,
|
||||||
|
DestroyZoneRequest, ExecInsideZoneReply, ExecInsideZoneRequest, GetHostCpuTopologyReply,
|
||||||
|
GetHostCpuTopologyRequest, GetHostStatusReply, GetHostStatusRequest, ListDevicesReply,
|
||||||
|
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
||||||
|
ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest, ReadZoneMetricsReply,
|
||||||
|
ReadZoneMetricsRequest, ResolveZoneIdReply, ResolveZoneIdRequest, SnoopIdmReply,
|
||||||
|
SnoopIdmRequest, UpdateZoneResourcesReply, UpdateZoneResourcesRequest, WatchEventsReply,
|
||||||
|
WatchEventsRequest, ZoneConsoleReply, ZoneConsoleRequest,
|
||||||
|
};
|
||||||
|
use krata::v1::control::{
|
||||||
|
GetZoneReply, GetZoneRequest, ListNetworkReservationsReply, ListNetworkReservationsRequest,
|
||||||
|
SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest,
|
||||||
|
};
|
||||||
|
use krataoci::packer::service::OciPackerService;
|
||||||
|
use kratart::Runtime;
|
||||||
|
|
||||||
|
use crate::control::attach_zone_console::AttachZoneConsoleRpc;
|
||||||
|
use crate::control::create_zone::CreateZoneRpc;
|
||||||
|
use crate::control::destroy_zone::DestroyZoneRpc;
|
||||||
|
use crate::control::exec_inside_zone::ExecInsideZoneRpc;
|
||||||
|
use crate::control::get_host_cpu_topology::GetHostCpuTopologyRpc;
|
||||||
|
use crate::control::get_host_status::GetHostStatusRpc;
|
||||||
|
use crate::control::get_zone::GetZoneRpc;
|
||||||
|
use crate::control::list_devices::ListDevicesRpc;
|
||||||
|
use crate::control::list_zones::ListZonesRpc;
|
||||||
|
use crate::control::pull_image::PullImageRpc;
|
||||||
|
use crate::control::read_hypervisor_console::ReadHypervisorConsoleRpc;
|
||||||
|
use crate::control::read_zone_metrics::ReadZoneMetricsRpc;
|
||||||
|
use crate::control::resolve_zone_id::ResolveZoneIdRpc;
|
||||||
|
use crate::control::set_host_power_management_policy::SetHostPowerManagementPolicyRpc;
|
||||||
|
use crate::control::snoop_idm::SnoopIdmRpc;
|
||||||
|
use crate::control::update_zone_resources::UpdateZoneResourcesRpc;
|
||||||
|
use crate::control::watch_events::WatchEventsRpc;
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
use crate::network::assignment::NetworkAssignment;
|
||||||
|
use crate::{
|
||||||
|
console::DaemonConsoleHandle, devices::DaemonDeviceManager, event::DaemonEventContext,
|
||||||
|
idm::DaemonIdmHandle, zlt::ZoneLookupTable,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod attach_zone_console;
|
||||||
|
pub mod create_zone;
|
||||||
|
pub mod destroy_zone;
|
||||||
|
pub mod exec_inside_zone;
|
||||||
|
pub mod get_host_cpu_topology;
|
||||||
|
pub mod get_host_status;
|
||||||
|
pub mod get_zone;
|
||||||
|
pub mod list_devices;
|
||||||
|
pub mod list_network_reservations;
|
||||||
|
pub mod list_zones;
|
||||||
|
pub mod pull_image;
|
||||||
|
pub mod read_hypervisor_console;
|
||||||
|
pub mod read_zone_metrics;
|
||||||
|
pub mod resolve_zone_id;
|
||||||
|
pub mod set_host_power_management_policy;
|
||||||
|
pub mod snoop_idm;
|
||||||
|
pub mod update_zone_resources;
|
||||||
|
pub mod watch_events;
|
||||||
|
|
||||||
|
pub struct ApiError {
|
||||||
|
message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for ApiError {
|
||||||
|
fn from(value: Error) -> Self {
|
||||||
|
ApiError {
|
||||||
|
message: value.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ApiError> for Status {
|
||||||
|
fn from(value: ApiError) -> Self {
|
||||||
|
Status::unknown(value.message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct DaemonControlService {
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
devices: DaemonDeviceManager,
|
||||||
|
events: DaemonEventContext,
|
||||||
|
console: DaemonConsoleHandle,
|
||||||
|
idm: DaemonIdmHandle,
|
||||||
|
zones: ZoneStore,
|
||||||
|
network: NetworkAssignment,
|
||||||
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
|
packer: OciPackerService,
|
||||||
|
runtime: Runtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DaemonControlService {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new(
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
devices: DaemonDeviceManager,
|
||||||
|
events: DaemonEventContext,
|
||||||
|
console: DaemonConsoleHandle,
|
||||||
|
idm: DaemonIdmHandle,
|
||||||
|
zones: ZoneStore,
|
||||||
|
network: NetworkAssignment,
|
||||||
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
|
packer: OciPackerService,
|
||||||
|
runtime: Runtime,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
zlt,
|
||||||
|
devices,
|
||||||
|
events,
|
||||||
|
console,
|
||||||
|
idm,
|
||||||
|
zones,
|
||||||
|
network,
|
||||||
|
zone_reconciler_notify,
|
||||||
|
packer,
|
||||||
|
runtime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tonic::async_trait]
|
||||||
|
impl ControlService for DaemonControlService {
|
||||||
|
async fn get_host_status(
|
||||||
|
&self,
|
||||||
|
request: Request<GetHostStatusRequest>,
|
||||||
|
) -> Result<Response<GetHostStatusReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
GetHostStatusRpc::new(self.network.clone(), self.zlt.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type SnoopIdmStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn snoop_idm(
|
||||||
|
&self,
|
||||||
|
request: Request<SnoopIdmRequest>,
|
||||||
|
) -> Result<Response<Self::SnoopIdmStream>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
SnoopIdmRpc::new(self.idm.clone(), self.zlt.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_host_cpu_topology(
|
||||||
|
&self,
|
||||||
|
request: Request<GetHostCpuTopologyRequest>,
|
||||||
|
) -> Result<Response<GetHostCpuTopologyReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
GetHostCpuTopologyRpc::new(self.runtime.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn set_host_power_management_policy(
|
||||||
|
&self,
|
||||||
|
request: Request<SetHostPowerManagementPolicyRequest>,
|
||||||
|
) -> Result<Response<SetHostPowerManagementPolicyReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
SetHostPowerManagementPolicyRpc::new(self.runtime.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_devices(
|
||||||
|
&self,
|
||||||
|
request: Request<ListDevicesRequest>,
|
||||||
|
) -> Result<Response<ListDevicesReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ListDevicesRpc::new(self.devices.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_network_reservations(
|
||||||
|
&self,
|
||||||
|
request: Request<ListNetworkReservationsRequest>,
|
||||||
|
) -> Result<Response<ListNetworkReservationsReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ListNetworkReservationsRpc::new(self.network.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PullImageStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn pull_image(
|
||||||
|
&self,
|
||||||
|
request: Request<PullImageRequest>,
|
||||||
|
) -> Result<Response<Self::PullImageStream>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
PullImageRpc::new(self.packer.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_zone(
|
||||||
|
&self,
|
||||||
|
request: Request<CreateZoneRequest>,
|
||||||
|
) -> Result<Response<CreateZoneReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
CreateZoneRpc::new(
|
||||||
|
self.zones.clone(),
|
||||||
|
self.zlt.clone(),
|
||||||
|
self.zone_reconciler_notify.clone(),
|
||||||
|
)
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn destroy_zone(
|
||||||
|
&self,
|
||||||
|
request: Request<DestroyZoneRequest>,
|
||||||
|
) -> Result<Response<DestroyZoneReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
DestroyZoneRpc::new(self.zones.clone(), self.zone_reconciler_notify.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn resolve_zone_id(
|
||||||
|
&self,
|
||||||
|
request: Request<ResolveZoneIdRequest>,
|
||||||
|
) -> Result<Response<ResolveZoneIdReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ResolveZoneIdRpc::new(self.zones.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_zone(
|
||||||
|
&self,
|
||||||
|
request: Request<GetZoneRequest>,
|
||||||
|
) -> Result<Response<GetZoneReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(GetZoneRpc::new(self.zones.clone()).process(request).await)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_zone_resources(
|
||||||
|
&self,
|
||||||
|
request: Request<UpdateZoneResourcesRequest>,
|
||||||
|
) -> Result<Response<UpdateZoneResourcesReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
UpdateZoneResourcesRpc::new(self.runtime.clone(), self.zones.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_zones(
|
||||||
|
&self,
|
||||||
|
request: Request<ListZonesRequest>,
|
||||||
|
) -> Result<Response<ListZonesReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(ListZonesRpc::new(self.zones.clone()).process(request).await)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AttachZoneConsoleStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn attach_zone_console(
|
||||||
|
&self,
|
||||||
|
request: Request<Streaming<ZoneConsoleRequest>>,
|
||||||
|
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
|
||||||
|
let input = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
AttachZoneConsoleRpc::new(self.console.clone())
|
||||||
|
.process(input)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecInsideZoneStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn exec_inside_zone(
|
||||||
|
&self,
|
||||||
|
request: Request<Streaming<ExecInsideZoneRequest>>,
|
||||||
|
) -> Result<Response<Self::ExecInsideZoneStream>, Status> {
|
||||||
|
let input = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ExecInsideZoneRpc::new(self.idm.clone())
|
||||||
|
.process(input)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_zone_metrics(
|
||||||
|
&self,
|
||||||
|
request: Request<ReadZoneMetricsRequest>,
|
||||||
|
) -> Result<Response<ReadZoneMetricsReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ReadZoneMetricsRpc::new(self.idm.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type WatchEventsStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
|
async fn watch_events(
|
||||||
|
&self,
|
||||||
|
request: Request<WatchEventsRequest>,
|
||||||
|
) -> Result<Response<Self::WatchEventsStream>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
WatchEventsRpc::new(self.events.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_hypervisor_console(
|
||||||
|
&self,
|
||||||
|
request: Request<ReadHypervisorConsoleRequest>,
|
||||||
|
) -> Result<Response<ReadHypervisorConsoleReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
adapt(
|
||||||
|
ReadHypervisorConsoleRpc::new(self.runtime.clone())
|
||||||
|
.process(request)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn adapt<T>(result: anyhow::Result<T>) -> Result<Response<T>, Status> {
|
||||||
|
result
|
||||||
|
.map(Response::new)
|
||||||
|
.map_err(|error| Status::unknown(error.to_string()))
|
||||||
|
}
|
100
crates/daemon/src/control/pull_image.rs
Normal file
100
crates/daemon/src/control/pull_image.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
use crate::control::ApiError;
|
||||||
|
use crate::oci::convert_oci_progress;
|
||||||
|
use anyhow::Result;
|
||||||
|
use async_stream::try_stream;
|
||||||
|
use krata::v1::common::OciImageFormat;
|
||||||
|
use krata::v1::control::{PullImageReply, PullImageRequest};
|
||||||
|
use krataoci::name::ImageName;
|
||||||
|
use krataoci::packer::service::OciPackerService;
|
||||||
|
use krataoci::packer::{OciPackedFormat, OciPackedImage};
|
||||||
|
use krataoci::progress::{OciProgress, OciProgressContext};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::task::JoinError;
|
||||||
|
use tokio_stream::Stream;
|
||||||
|
use tonic::Status;
|
||||||
|
|
||||||
|
enum PullImageSelect {
|
||||||
|
Progress(Option<OciProgress>),
|
||||||
|
Completed(Result<Result<OciPackedImage, anyhow::Error>, JoinError>),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PullImageRpc {
|
||||||
|
packer: OciPackerService,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PullImageRpc {
|
||||||
|
pub fn new(packer: OciPackerService) -> Self {
|
||||||
|
Self { packer }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
request: PullImageRequest,
|
||||||
|
) -> Result<Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>> {
|
||||||
|
let name = ImageName::parse(&request.image)?;
|
||||||
|
let format = match request.format() {
|
||||||
|
OciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
||||||
|
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||||
|
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||||
|
OciImageFormat::Tar => OciPackedFormat::Tar,
|
||||||
|
};
|
||||||
|
let (context, mut receiver) = OciProgressContext::create();
|
||||||
|
let our_packer = self.packer;
|
||||||
|
|
||||||
|
let output = try_stream! {
|
||||||
|
let mut task = tokio::task::spawn(async move {
|
||||||
|
our_packer.request(name, format, request.overwrite_cache, request.update, context).await
|
||||||
|
});
|
||||||
|
let abort_handle = task.abort_handle();
|
||||||
|
let _task_cancel_guard = scopeguard::guard(abort_handle, |handle| {
|
||||||
|
handle.abort();
|
||||||
|
});
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let what = select! {
|
||||||
|
x = receiver.changed() => match x {
|
||||||
|
Ok(_) => PullImageSelect::Progress(Some(receiver.borrow_and_update().clone())),
|
||||||
|
Err(_) => PullImageSelect::Progress(None),
|
||||||
|
},
|
||||||
|
x = &mut task => PullImageSelect::Completed(x),
|
||||||
|
};
|
||||||
|
match what {
|
||||||
|
PullImageSelect::Progress(Some(progress)) => {
|
||||||
|
let reply = PullImageReply {
|
||||||
|
progress: Some(convert_oci_progress(progress)),
|
||||||
|
digest: String::new(),
|
||||||
|
format: OciImageFormat::Unknown.into(),
|
||||||
|
};
|
||||||
|
yield reply;
|
||||||
|
},
|
||||||
|
|
||||||
|
PullImageSelect::Completed(result) => {
|
||||||
|
let result = result.map_err(|err| ApiError {
|
||||||
|
message: err.to_string(),
|
||||||
|
})?;
|
||||||
|
let packed = result.map_err(|err| ApiError {
|
||||||
|
message: err.to_string(),
|
||||||
|
})?;
|
||||||
|
let reply = PullImageReply {
|
||||||
|
progress: None,
|
||||||
|
digest: packed.digest,
|
||||||
|
format: match packed.format {
|
||||||
|
OciPackedFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
||||||
|
OciPackedFormat::Erofs => OciImageFormat::Erofs.into(),
|
||||||
|
OciPackedFormat::Tar => OciImageFormat::Tar.into(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
yield reply;
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
|
||||||
|
_ => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Box::pin(output))
|
||||||
|
}
|
||||||
|
}
|
23
crates/daemon/src/control/read_hypervisor_console.rs
Normal file
23
crates/daemon/src/control/read_hypervisor_console.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::control::{ReadHypervisorConsoleReply, ReadHypervisorConsoleRequest};
|
||||||
|
use kratart::Runtime;
|
||||||
|
|
||||||
|
pub struct ReadHypervisorConsoleRpc {
|
||||||
|
runtime: Runtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadHypervisorConsoleRpc {
|
||||||
|
pub fn new(runtime: Runtime) -> Self {
|
||||||
|
Self { runtime }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
_: ReadHypervisorConsoleRequest,
|
||||||
|
) -> Result<ReadHypervisorConsoleReply> {
|
||||||
|
let data = self.runtime.read_hypervisor_console(false).await?;
|
||||||
|
Ok(ReadHypervisorConsoleReply {
|
||||||
|
data: data.to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
40
crates/daemon/src/control/read_zone_metrics.rs
Normal file
40
crates/daemon/src/control/read_zone_metrics.rs
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::idm::internal::MetricsRequest;
|
||||||
|
use krata::idm::internal::{
|
||||||
|
request::Request as IdmRequestType, response::Response as IdmResponseType,
|
||||||
|
Request as IdmRequest,
|
||||||
|
};
|
||||||
|
use krata::v1::control::{ReadZoneMetricsReply, ReadZoneMetricsRequest};
|
||||||
|
|
||||||
|
use crate::idm::DaemonIdmHandle;
|
||||||
|
use crate::metrics::idm_metric_to_api;
|
||||||
|
|
||||||
|
pub struct ReadZoneMetricsRpc {
|
||||||
|
idm: DaemonIdmHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReadZoneMetricsRpc {
|
||||||
|
pub fn new(idm: DaemonIdmHandle) -> Self {
|
||||||
|
Self { idm }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, request: ReadZoneMetricsRequest) -> Result<ReadZoneMetricsReply> {
|
||||||
|
let uuid = Uuid::from_str(&request.zone_id)?;
|
||||||
|
let client = self.idm.client(uuid).await?;
|
||||||
|
let response = client
|
||||||
|
.send(IdmRequest {
|
||||||
|
request: Some(IdmRequestType::Metrics(MetricsRequest {})),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut reply = ReadZoneMetricsReply::default();
|
||||||
|
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
||||||
|
reply.root = metrics.root.map(idm_metric_to_api);
|
||||||
|
}
|
||||||
|
Ok(reply)
|
||||||
|
}
|
||||||
|
}
|
30
crates/daemon/src/control/resolve_zone_id.rs
Normal file
30
crates/daemon/src/control/resolve_zone_id.rs
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::Zone;
|
||||||
|
use krata::v1::control::{ResolveZoneIdReply, ResolveZoneIdRequest};
|
||||||
|
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
|
||||||
|
pub struct ResolveZoneIdRpc {
|
||||||
|
zones: ZoneStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResolveZoneIdRpc {
|
||||||
|
pub fn new(zones: ZoneStore) -> Self {
|
||||||
|
Self { zones }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(self, request: ResolveZoneIdRequest) -> Result<ResolveZoneIdReply> {
|
||||||
|
let zones = self.zones.list().await?;
|
||||||
|
let zones = zones
|
||||||
|
.into_values()
|
||||||
|
.filter(|x| {
|
||||||
|
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
||||||
|
(!request.name.is_empty() && comparison_spec.name == request.name)
|
||||||
|
|| x.id == request.name
|
||||||
|
})
|
||||||
|
.collect::<Vec<Zone>>();
|
||||||
|
Ok(ResolveZoneIdReply {
|
||||||
|
zone_id: zones.first().cloned().map(|x| x.id).unwrap_or_default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::control::{SetHostPowerManagementPolicyReply, SetHostPowerManagementPolicyRequest};
|
||||||
|
use kratart::Runtime;
|
||||||
|
|
||||||
|
pub struct SetHostPowerManagementPolicyRpc {
|
||||||
|
runtime: Runtime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SetHostPowerManagementPolicyRpc {
|
||||||
|
pub fn new(runtime: Runtime) -> Self {
|
||||||
|
Self { runtime }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
request: SetHostPowerManagementPolicyRequest,
|
||||||
|
) -> Result<SetHostPowerManagementPolicyReply> {
|
||||||
|
let power = self.runtime.power_management_context().await?;
|
||||||
|
let scheduler = &request.scheduler;
|
||||||
|
|
||||||
|
power.set_smt_policy(request.smt_awareness).await?;
|
||||||
|
power.set_scheduler_policy(scheduler).await?;
|
||||||
|
Ok(SetHostPowerManagementPolicyReply {})
|
||||||
|
}
|
||||||
|
}
|
39
crates/daemon/src/control/snoop_idm.rs
Normal file
39
crates/daemon/src/control/snoop_idm.rs
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
use crate::idm::DaemonIdmHandle;
|
||||||
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
use anyhow::Result;
|
||||||
|
use async_stream::try_stream;
|
||||||
|
use krata::v1::control::{SnoopIdmReply, SnoopIdmRequest};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use tokio_stream::Stream;
|
||||||
|
use tonic::Status;
|
||||||
|
|
||||||
|
pub struct SnoopIdmRpc {
|
||||||
|
idm: DaemonIdmHandle,
|
||||||
|
zlt: ZoneLookupTable,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SnoopIdmRpc {
|
||||||
|
pub fn new(idm: DaemonIdmHandle, zlt: ZoneLookupTable) -> Self {
|
||||||
|
Self { idm, zlt }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
_request: SnoopIdmRequest,
|
||||||
|
) -> Result<Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>> {
|
||||||
|
let mut messages = self.idm.snoop();
|
||||||
|
let zlt = self.zlt.clone();
|
||||||
|
let output = try_stream! {
|
||||||
|
while let Ok(event) = messages.recv().await {
|
||||||
|
let Some(from_uuid) = zlt.lookup_uuid_by_domid(event.from).await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some(to_uuid) = zlt.lookup_uuid_by_domid(event.to).await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
yield SnoopIdmReply { from: from_uuid.to_string(), to: to_uuid.to_string(), packet: Some(event.packet) };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Box::pin(output))
|
||||||
|
}
|
||||||
|
}
|
82
crates/daemon/src/control/update_zone_resources.rs
Normal file
82
crates/daemon/src/control/update_zone_resources.rs
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use krata::v1::common::{ZoneResourceStatus, ZoneState};
|
||||||
|
use krata::v1::control::{UpdateZoneResourcesReply, UpdateZoneResourcesRequest};
|
||||||
|
use kratart::Runtime;
|
||||||
|
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
|
||||||
|
pub struct UpdateZoneResourcesRpc {
|
||||||
|
runtime: Runtime,
|
||||||
|
zones: ZoneStore,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UpdateZoneResourcesRpc {
|
||||||
|
pub fn new(runtime: Runtime, zones: ZoneStore) -> Self {
|
||||||
|
Self { runtime, zones }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
request: UpdateZoneResourcesRequest,
|
||||||
|
) -> Result<UpdateZoneResourcesReply> {
|
||||||
|
let uuid = Uuid::from_str(&request.zone_id)?;
|
||||||
|
let Some(mut zone) = self.zones.read(uuid).await? else {
|
||||||
|
return Err(anyhow!("zone not found"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(ref mut status) = zone.status else {
|
||||||
|
return Err(anyhow!("zone state not available"));
|
||||||
|
};
|
||||||
|
|
||||||
|
if status.state() != ZoneState::Created {
|
||||||
|
return Err(anyhow!("zone is in an invalid state"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.domid == 0 || status.domid == u32::MAX {
|
||||||
|
return Err(anyhow!("zone domid is invalid"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut resources = request.resources.unwrap_or_default();
|
||||||
|
if resources.target_memory > resources.max_memory {
|
||||||
|
resources.max_memory = resources.target_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
if resources.target_cpus < 1 {
|
||||||
|
resources.target_cpus = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let initial_resources = zone
|
||||||
|
.spec
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.initial_resources
|
||||||
|
.unwrap_or_default();
|
||||||
|
if resources.target_cpus > initial_resources.max_cpus {
|
||||||
|
resources.target_cpus = initial_resources.max_cpus;
|
||||||
|
}
|
||||||
|
resources.max_cpus = initial_resources.max_cpus;
|
||||||
|
|
||||||
|
self.runtime
|
||||||
|
.set_memory_resources(
|
||||||
|
status.domid,
|
||||||
|
resources.target_memory * 1024 * 1024,
|
||||||
|
resources.max_memory * 1024 * 1024,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|error| anyhow!("failed to set memory resources: {}", error))?;
|
||||||
|
self.runtime
|
||||||
|
.set_cpu_resources(status.domid, resources.target_cpus)
|
||||||
|
.await
|
||||||
|
.map_err(|error| anyhow!("failed to set cpu resources: {}", error))?;
|
||||||
|
status.resource_status = Some(ZoneResourceStatus {
|
||||||
|
active_resources: Some(resources),
|
||||||
|
});
|
||||||
|
|
||||||
|
self.zones.update(uuid, zone).await?;
|
||||||
|
Ok(UpdateZoneResourcesReply {})
|
||||||
|
}
|
||||||
|
}
|
31
crates/daemon/src/control/watch_events.rs
Normal file
31
crates/daemon/src/control/watch_events.rs
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
use crate::event::DaemonEventContext;
|
||||||
|
use anyhow::Result;
|
||||||
|
use async_stream::try_stream;
|
||||||
|
use krata::v1::control::{WatchEventsReply, WatchEventsRequest};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use tokio_stream::Stream;
|
||||||
|
use tonic::Status;
|
||||||
|
|
||||||
|
pub struct WatchEventsRpc {
|
||||||
|
events: DaemonEventContext,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WatchEventsRpc {
|
||||||
|
pub fn new(events: DaemonEventContext) -> Self {
|
||||||
|
Self { events }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process(
|
||||||
|
self,
|
||||||
|
_request: WatchEventsRequest,
|
||||||
|
) -> Result<Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>>
|
||||||
|
{
|
||||||
|
let mut events = self.events.subscribe();
|
||||||
|
let output = try_stream! {
|
||||||
|
while let Ok(event) = events.recv().await {
|
||||||
|
yield WatchEventsReply { event: Some(event), };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Box::pin(output))
|
||||||
|
}
|
||||||
|
}
|
21
crates/daemon/src/db/mod.rs
Normal file
21
crates/daemon/src/db/mod.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use redb::Database;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
pub mod network;
|
||||||
|
pub mod zone;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct KrataDatabase {
|
||||||
|
pub database: Arc<Database>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KrataDatabase {
|
||||||
|
pub fn open(path: &Path) -> Result<Self> {
|
||||||
|
let database = Database::create(path)?;
|
||||||
|
Ok(KrataDatabase {
|
||||||
|
database: Arc::new(database),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
134
crates/daemon/src/db/network.rs
Normal file
134
crates/daemon/src/db/network.rs
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
use crate::db::KrataDatabase;
|
||||||
|
use advmac::MacAddr6;
|
||||||
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::NetworkReservation as ApiNetworkReservation;
|
||||||
|
use log::error;
|
||||||
|
use redb::{ReadableTable, TableDefinition};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
const NETWORK_RESERVATION_TABLE: TableDefinition<u128, &[u8]> =
|
||||||
|
TableDefinition::new("network-reservation");
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NetworkReservationStore {
|
||||||
|
db: KrataDatabase,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkReservationStore {
|
||||||
|
pub fn open(db: KrataDatabase) -> Result<Self> {
|
||||||
|
let write = db.database.begin_write()?;
|
||||||
|
let _ = write.open_table(NETWORK_RESERVATION_TABLE);
|
||||||
|
write.commit()?;
|
||||||
|
Ok(NetworkReservationStore { db })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn read(&self, id: Uuid) -> Result<Option<NetworkReservation>> {
|
||||||
|
let read = self.db.database.begin_read()?;
|
||||||
|
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
|
||||||
|
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let bytes = entry.value();
|
||||||
|
Ok(Some(serde_json::from_slice(bytes)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
|
||||||
|
enum ListEntry {
|
||||||
|
Valid(Uuid, NetworkReservation),
|
||||||
|
Invalid(Uuid),
|
||||||
|
}
|
||||||
|
let mut reservations: HashMap<Uuid, NetworkReservation> = HashMap::new();
|
||||||
|
|
||||||
|
let corruptions = {
|
||||||
|
let read = self.db.database.begin_read()?;
|
||||||
|
let table = read.open_table(NETWORK_RESERVATION_TABLE)?;
|
||||||
|
table
|
||||||
|
.iter()?
|
||||||
|
.flat_map(|result| {
|
||||||
|
result.map(|(key, value)| {
|
||||||
|
let uuid = Uuid::from_u128_le(key.value());
|
||||||
|
match serde_json::from_slice::<NetworkReservation>(value.value()) {
|
||||||
|
Ok(reservation) => ListEntry::Valid(uuid, reservation),
|
||||||
|
Err(error) => {
|
||||||
|
error!(
|
||||||
|
"found invalid network reservation in database for uuid {}: {}",
|
||||||
|
uuid, error
|
||||||
|
);
|
||||||
|
ListEntry::Invalid(uuid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.filter_map(|entry| match entry {
|
||||||
|
ListEntry::Valid(uuid, reservation) => {
|
||||||
|
reservations.insert(uuid, reservation);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
ListEntry::Invalid(uuid) => Some(uuid),
|
||||||
|
})
|
||||||
|
.collect::<Vec<Uuid>>()
|
||||||
|
};
|
||||||
|
|
||||||
|
if !corruptions.is_empty() {
|
||||||
|
let write = self.db.database.begin_write()?;
|
||||||
|
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
|
||||||
|
for corruption in corruptions {
|
||||||
|
table.remove(corruption.to_u128_le())?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(reservations)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update(&self, id: Uuid, entry: NetworkReservation) -> Result<()> {
|
||||||
|
let write = self.db.database.begin_write()?;
|
||||||
|
{
|
||||||
|
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
|
||||||
|
let bytes = serde_json::to_vec(&entry)?;
|
||||||
|
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||||
|
}
|
||||||
|
write.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||||
|
let write = self.db.database.begin_write()?;
|
||||||
|
{
|
||||||
|
let mut table = write.open_table(NETWORK_RESERVATION_TABLE)?;
|
||||||
|
table.remove(id.to_u128_le())?;
|
||||||
|
}
|
||||||
|
write.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||||
|
pub struct NetworkReservation {
|
||||||
|
pub uuid: String,
|
||||||
|
pub ipv4: Ipv4Addr,
|
||||||
|
pub ipv6: Ipv6Addr,
|
||||||
|
pub mac: MacAddr6,
|
||||||
|
pub ipv4_prefix: u8,
|
||||||
|
pub ipv6_prefix: u8,
|
||||||
|
pub gateway_ipv4: Ipv4Addr,
|
||||||
|
pub gateway_ipv6: Ipv6Addr,
|
||||||
|
pub gateway_mac: MacAddr6,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<NetworkReservation> for ApiNetworkReservation {
|
||||||
|
fn from(val: NetworkReservation) -> Self {
|
||||||
|
ApiNetworkReservation {
|
||||||
|
uuid: val.uuid,
|
||||||
|
ipv4: format!("{}/{}", val.ipv4, val.ipv4_prefix),
|
||||||
|
ipv6: format!("{}/{}", val.ipv6, val.ipv6_prefix),
|
||||||
|
mac: val.mac.to_string().to_lowercase().replace('-', ":"),
|
||||||
|
gateway_ipv4: format!("{}/{}", val.gateway_ipv4, val.ipv4_prefix),
|
||||||
|
gateway_ipv6: format!("{}/{}", val.gateway_ipv6, val.ipv6_prefix),
|
||||||
|
gateway_mac: val.gateway_mac.to_string().to_lowercase().replace('-', ":"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,33 +1,31 @@
|
|||||||
use std::{collections::HashMap, path::Path, sync::Arc};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::db::KrataDatabase;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::common::Zone;
|
use krata::v1::common::Zone;
|
||||||
use log::error;
|
use log::error;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use redb::{Database, ReadableTable, TableDefinition};
|
use redb::{ReadableTable, TableDefinition};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
|
const ZONE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new("zone");
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ZoneStore {
|
pub struct ZoneStore {
|
||||||
database: Arc<Database>,
|
db: KrataDatabase,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZoneStore {
|
impl ZoneStore {
|
||||||
pub fn open(path: &Path) -> Result<Self> {
|
pub fn open(db: KrataDatabase) -> Result<Self> {
|
||||||
let database = Database::create(path)?;
|
let write = db.database.begin_write()?;
|
||||||
let write = database.begin_write()?;
|
let _ = write.open_table(ZONE_TABLE);
|
||||||
let _ = write.open_table(ZONES);
|
|
||||||
write.commit()?;
|
write.commit()?;
|
||||||
Ok(ZoneStore {
|
Ok(ZoneStore { db })
|
||||||
database: Arc::new(database),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
||||||
let read = self.database.begin_read()?;
|
let read = self.db.database.begin_read()?;
|
||||||
let table = read.open_table(ZONES)?;
|
let table = read.open_table(ZONE_TABLE)?;
|
||||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
@ -37,8 +35,8 @@ impl ZoneStore {
|
|||||||
|
|
||||||
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
||||||
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||||
let read = self.database.begin_read()?;
|
let read = self.db.database.begin_read()?;
|
||||||
let table = read.open_table(ZONES)?;
|
let table = read.open_table(ZONE_TABLE)?;
|
||||||
for result in table.iter()? {
|
for result in table.iter()? {
|
||||||
let (key, value) = result?;
|
let (key, value) = result?;
|
||||||
let uuid = Uuid::from_u128_le(key.value());
|
let uuid = Uuid::from_u128_le(key.value());
|
||||||
@ -58,9 +56,9 @@ impl ZoneStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.db.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(ZONES)?;
|
let mut table = write.open_table(ZONE_TABLE)?;
|
||||||
let bytes = entry.encode_to_vec();
|
let bytes = entry.encode_to_vec();
|
||||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||||
}
|
}
|
||||||
@ -69,9 +67,9 @@ impl ZoneStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.db.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(ZONES)?;
|
let mut table = write.open_table(ZONE_TABLE)?;
|
||||||
table.remove(id.to_u128_le())?;
|
table.remove(id.to_u128_le())?;
|
||||||
}
|
}
|
||||||
write.commit()?;
|
write.commit()?;
|
@ -4,9 +4,10 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
|
use crate::db::zone::ZoneStore;
|
||||||
|
use crate::idm::DaemonIdmHandle;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::common::ZoneExitInfo;
|
use krata::v1::common::ZoneExitStatus;
|
||||||
use krata::{
|
use krata::{
|
||||||
idm::{internal::event::Event as EventType, internal::Event},
|
idm::{internal::event::Event as EventType, internal::Event},
|
||||||
v1::common::{ZoneState, ZoneStatus},
|
v1::common::{ZoneState, ZoneStatus},
|
||||||
@ -83,15 +84,15 @@ impl DaemonEventGenerator {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = zone.state else {
|
let Some(ref status) = zone.status else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let status = state.status();
|
let state = status.state();
|
||||||
let id = Uuid::from_str(&zone.id)?;
|
let id = Uuid::from_str(&zone.id)?;
|
||||||
let domid = state.domid;
|
let domid = status.domid;
|
||||||
match status {
|
match state {
|
||||||
ZoneStatus::Started => {
|
ZoneState::Created => {
|
||||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||||
let client = self.idm.client_by_domid(domid).await?;
|
let client = self.idm.client_by_domid(domid).await?;
|
||||||
let mut receiver = client.subscribe().await?;
|
let mut receiver = client.subscribe().await?;
|
||||||
@ -111,7 +112,7 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ZoneStatus::Destroyed => {
|
ZoneState::Destroyed => {
|
||||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||||
handle.abort();
|
handle.abort();
|
||||||
}
|
}
|
||||||
@ -131,13 +132,14 @@ impl DaemonEventGenerator {
|
|||||||
|
|
||||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||||
if let Some(mut zone) = self.zones.read(id).await? {
|
if let Some(mut zone) = self.zones.read(id).await? {
|
||||||
zone.state = Some(ZoneState {
|
zone.status = Some(ZoneStatus {
|
||||||
status: ZoneStatus::Exited.into(),
|
state: ZoneState::Exited.into(),
|
||||||
network: zone.state.clone().unwrap_or_default().network,
|
network_status: zone.status.clone().unwrap_or_default().network_status,
|
||||||
exit_info: Some(ZoneExitInfo { code }),
|
exit_status: Some(ZoneExitStatus { code }),
|
||||||
error_info: None,
|
error_status: None,
|
||||||
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
|
resource_status: zone.status.clone().unwrap_or_default().resource_status,
|
||||||
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
host: zone.status.clone().map(|x| x.host).unwrap_or_default(),
|
||||||
|
domid: zone.status.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
|
|
||||||
self.zones.update(id, zone).await?;
|
self.zones.update(id, zone).await?;
|
||||||
|
@ -11,7 +11,7 @@ use krata::idm::{
|
|||||||
transport::IdmTransportPacket,
|
transport::IdmTransportPacket,
|
||||||
};
|
};
|
||||||
use kratart::channel::ChannelService;
|
use kratart::channel::ChannelService;
|
||||||
use log::{error, warn};
|
use log::{debug, error, warn};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
@ -31,7 +31,7 @@ type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonIdmHandle {
|
pub struct DaemonIdmHandle {
|
||||||
glt: ZoneLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -45,7 +45,7 @@ impl DaemonIdmHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
|
pub async fn client(&self, uuid: Uuid) -> Result<IdmInternalClient> {
|
||||||
let Some(domid) = self.glt.lookup_domid_by_uuid(&uuid).await else {
|
let Some(domid) = self.zlt.lookup_domid_by_uuid(&uuid).await else {
|
||||||
return Err(anyhow!("unable to find domain {}", uuid));
|
return Err(anyhow!("unable to find domain {}", uuid));
|
||||||
};
|
};
|
||||||
self.client_by_domid(domid).await
|
self.client_by_domid(domid).await
|
||||||
@ -72,7 +72,7 @@ pub struct DaemonIdmSnoopPacket {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonIdm {
|
pub struct DaemonIdm {
|
||||||
glt: ZoneLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -84,16 +84,21 @@ pub struct DaemonIdm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonIdm {
|
impl DaemonIdm {
|
||||||
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
|
pub async fn new(zlt: ZoneLookupTable) -> Result<DaemonIdm> {
|
||||||
|
debug!("allocating channel service for idm");
|
||||||
let (service, tx_raw_sender, rx_receiver) =
|
let (service, tx_raw_sender, rx_receiver) =
|
||||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||||
let (tx_sender, tx_receiver) = channel(100);
|
let (tx_sender, tx_receiver) = channel(100);
|
||||||
let (snoop_sender, _) = broadcast::channel(100);
|
let (snoop_sender, _) = broadcast::channel(100);
|
||||||
|
|
||||||
|
debug!("starting idm channel service");
|
||||||
let task = service.launch().await?;
|
let task = service.launch().await?;
|
||||||
|
|
||||||
let clients = Arc::new(Mutex::new(HashMap::new()));
|
let clients = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let feeds = Arc::new(Mutex::new(HashMap::new()));
|
let feeds = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
|
||||||
Ok(DaemonIdm {
|
Ok(DaemonIdm {
|
||||||
glt,
|
zlt,
|
||||||
rx_receiver,
|
rx_receiver,
|
||||||
tx_receiver,
|
tx_receiver,
|
||||||
tx_sender,
|
tx_sender,
|
||||||
@ -106,7 +111,7 @@ impl DaemonIdm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
|
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
|
||||||
let glt = self.glt.clone();
|
let zlt = self.zlt.clone();
|
||||||
let clients = self.clients.clone();
|
let clients = self.clients.clone();
|
||||||
let feeds = self.feeds.clone();
|
let feeds = self.feeds.clone();
|
||||||
let tx_sender = self.tx_sender.clone();
|
let tx_sender = self.tx_sender.clone();
|
||||||
@ -119,7 +124,7 @@ impl DaemonIdm {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(DaemonIdmHandle {
|
Ok(DaemonIdmHandle {
|
||||||
glt,
|
zlt,
|
||||||
clients,
|
clients,
|
||||||
feeds,
|
feeds,
|
||||||
tx_sender,
|
tx_sender,
|
||||||
@ -128,52 +133,99 @@ impl DaemonIdm {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn process_rx_packet(
|
||||||
|
&mut self,
|
||||||
|
domid: u32,
|
||||||
|
data: Option<Vec<u8>>,
|
||||||
|
buffers: &mut HashMap<u32, BytesMut>,
|
||||||
|
) -> Result<()> {
|
||||||
|
// check if data is present, if it is not, that signals a closed channel.
|
||||||
|
if let Some(data) = data {
|
||||||
|
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
loop {
|
||||||
|
// check if the buffer is less than the header size, if so, wait for more data
|
||||||
|
if buffer.len() < 6 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for the magic bytes 0xff, 0xff at the start of the message, if that doesn't
|
||||||
|
// exist, clear the buffer. this ensures that partial messages won't be processed.
|
||||||
|
if buffer[0] != 0xff || buffer[1] != 0xff {
|
||||||
|
buffer.clear();
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// read the size from the buffer as a little endian u32
|
||||||
|
let size = (buffer[2] as u32
|
||||||
|
| (buffer[3] as u32) << 8
|
||||||
|
| (buffer[4] as u32) << 16
|
||||||
|
| (buffer[5] as u32) << 24) as usize;
|
||||||
|
let needed = size + 6;
|
||||||
|
if buffer.len() < needed {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let mut packet = buffer.split_to(needed);
|
||||||
|
// advance the buffer by the header, leaving only the raw data.
|
||||||
|
packet.advance(6);
|
||||||
|
match IdmTransportPacket::decode(packet) {
|
||||||
|
Ok(packet) => {
|
||||||
|
let _ =
|
||||||
|
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds)
|
||||||
|
.await?;
|
||||||
|
let guard = self.feeds.lock().await;
|
||||||
|
if let Some(feed) = guard.get(&domid) {
|
||||||
|
let _ = feed.try_send(packet.clone());
|
||||||
|
}
|
||||||
|
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
|
||||||
|
from: domid,
|
||||||
|
to: 0,
|
||||||
|
packet,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(packet) => {
|
||||||
|
warn!("received invalid packet from domain {}: {}", domid, packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let mut clients = self.clients.lock().await;
|
||||||
|
let mut feeds = self.feeds.lock().await;
|
||||||
|
clients.remove(&domid);
|
||||||
|
feeds.remove(&domid);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn tx_packet(&mut self, domid: u32, packet: IdmTransportPacket) -> Result<()> {
|
||||||
|
let data = packet.encode_to_vec();
|
||||||
|
let mut buffer = vec![0u8; 6];
|
||||||
|
let length = data.len() as u32;
|
||||||
|
// magic bytes
|
||||||
|
buffer[0] = 0xff;
|
||||||
|
buffer[1] = 0xff;
|
||||||
|
// little endian u32 for message size
|
||||||
|
buffer[2] = length as u8;
|
||||||
|
buffer[3] = (length << 8) as u8;
|
||||||
|
buffer[4] = (length << 16) as u8;
|
||||||
|
buffer[5] = (length << 24) as u8;
|
||||||
|
buffer.extend_from_slice(&data);
|
||||||
|
self.tx_raw_sender.send((domid, buffer)).await?;
|
||||||
|
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
|
||||||
|
from: 0,
|
||||||
|
to: domid,
|
||||||
|
packet,
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
|
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
|
||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = self.rx_receiver.recv() => match x {
|
x = self.rx_receiver.recv() => match x {
|
||||||
Some((domid, data)) => {
|
Some((domid, data)) => {
|
||||||
if let Some(data) = data {
|
self.process_rx_packet(domid, data, buffers).await?;
|
||||||
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
|
||||||
buffer.extend_from_slice(&data);
|
|
||||||
loop {
|
|
||||||
if buffer.len() < 6 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if buffer[0] != 0xff || buffer[1] != 0xff {
|
|
||||||
buffer.clear();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let size = (buffer[2] as u32 | (buffer[3] as u32) << 8 | (buffer[4] as u32) << 16 | (buffer[5] as u32) << 24) as usize;
|
|
||||||
let needed = size + 6;
|
|
||||||
if buffer.len() < needed {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let mut packet = buffer.split_to(needed);
|
|
||||||
packet.advance(6);
|
|
||||||
match IdmTransportPacket::decode(packet) {
|
|
||||||
Ok(packet) => {
|
|
||||||
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
|
|
||||||
let guard = self.feeds.lock().await;
|
|
||||||
if let Some(feed) = guard.get(&domid) {
|
|
||||||
let _ = feed.try_send(packet.clone());
|
|
||||||
}
|
|
||||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: domid, to: 0, packet });
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(packet) => {
|
|
||||||
warn!("received invalid packet from domain {}: {}", domid, packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let mut clients = self.clients.lock().await;
|
|
||||||
let mut feeds = self.feeds.lock().await;
|
|
||||||
clients.remove(&domid);
|
|
||||||
feeds.remove(&domid);
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
|
||||||
None => {
|
None => {
|
||||||
@ -182,25 +234,14 @@ impl DaemonIdm {
|
|||||||
},
|
},
|
||||||
x = self.tx_receiver.recv() => match x {
|
x = self.tx_receiver.recv() => match x {
|
||||||
Some((domid, packet)) => {
|
Some((domid, packet)) => {
|
||||||
let data = packet.encode_to_vec();
|
self.tx_packet(domid, packet).await?;
|
||||||
let mut buffer = vec![0u8; 6];
|
|
||||||
let length = data.len() as u32;
|
|
||||||
buffer[0] = 0xff;
|
|
||||||
buffer[1] = 0xff;
|
|
||||||
buffer[2] = length as u8;
|
|
||||||
buffer[3] = (length << 8) as u8;
|
|
||||||
buffer[4] = (length << 16) as u8;
|
|
||||||
buffer[5] = (length << 24) as u8;
|
|
||||||
buffer.extend_from_slice(&data);
|
|
||||||
self.tx_raw_sender.send((domid, buffer)).await?;
|
|
||||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: 0, to: domid, packet });
|
|
||||||
},
|
},
|
||||||
|
|
||||||
None => {
|
None => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -249,9 +290,9 @@ pub struct IdmDaemonBackend {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl IdmBackend for IdmDaemonBackend {
|
impl IdmBackend for IdmDaemonBackend {
|
||||||
async fn recv(&mut self) -> Result<IdmTransportPacket> {
|
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>> {
|
||||||
if let Some(packet) = self.rx_receiver.recv().await {
|
if let Some(packet) = self.rx_receiver.recv().await {
|
||||||
Ok(packet)
|
Ok(vec![packet])
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("idm receive channel closed"))
|
Err(anyhow!("idm receive channel closed"))
|
||||||
}
|
}
|
||||||
|
@ -1,18 +1,23 @@
|
|||||||
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
|
use crate::db::network::NetworkReservationStore;
|
||||||
|
use crate::db::zone::ZoneStore;
|
||||||
|
use crate::db::KrataDatabase;
|
||||||
|
use crate::network::assignment::NetworkAssignment;
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use config::DaemonConfig;
|
use config::DaemonConfig;
|
||||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||||
use control::DaemonControlService;
|
use control::DaemonControlService;
|
||||||
use db::ZoneStore;
|
|
||||||
use devices::DaemonDeviceManager;
|
use devices::DaemonDeviceManager;
|
||||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||||
|
use ipnetwork::{Ipv4Network, Ipv6Network};
|
||||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||||
use kratart::Runtime;
|
use kratart::Runtime;
|
||||||
use log::info;
|
use log::{debug, info};
|
||||||
use reconcile::zone::ZoneReconciler;
|
use reconcile::zone::ZoneReconciler;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs,
|
fs,
|
||||||
net::UnixListener,
|
net::UnixListener,
|
||||||
@ -33,6 +38,7 @@ pub mod devices;
|
|||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod idm;
|
pub mod idm;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
|
pub mod network;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
pub mod reconcile;
|
pub mod reconcile;
|
||||||
pub mod zlt;
|
pub mod zlt;
|
||||||
@ -40,9 +46,10 @@ pub mod zlt;
|
|||||||
pub struct Daemon {
|
pub struct Daemon {
|
||||||
store: String,
|
store: String,
|
||||||
_config: Arc<DaemonConfig>,
|
_config: Arc<DaemonConfig>,
|
||||||
glt: ZoneLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
zones: ZoneStore,
|
zones: ZoneStore,
|
||||||
|
network: NetworkAssignment,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
zone_reconciler_task: JoinHandle<()>,
|
zone_reconciler_task: JoinHandle<()>,
|
||||||
zone_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
@ -58,18 +65,22 @@ const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
|
|||||||
impl Daemon {
|
impl Daemon {
|
||||||
pub async fn new(store: String) -> Result<Self> {
|
pub async fn new(store: String) -> Result<Self> {
|
||||||
let store_dir = PathBuf::from(store.clone());
|
let store_dir = PathBuf::from(store.clone());
|
||||||
|
debug!("loading configuration");
|
||||||
let mut config_path = store_dir.clone();
|
let mut config_path = store_dir.clone();
|
||||||
config_path.push("config.toml");
|
config_path.push("config.toml");
|
||||||
|
|
||||||
let config = DaemonConfig::load(&config_path).await?;
|
let config = DaemonConfig::load(&config_path).await?;
|
||||||
let config = Arc::new(config);
|
let config = Arc::new(config);
|
||||||
|
debug!("initializing device manager");
|
||||||
let devices = DaemonDeviceManager::new(config.clone());
|
let devices = DaemonDeviceManager::new(config.clone());
|
||||||
|
|
||||||
|
debug!("validating image cache directory");
|
||||||
let mut image_cache_dir = store_dir.clone();
|
let mut image_cache_dir = store_dir.clone();
|
||||||
image_cache_dir.push("cache");
|
image_cache_dir.push("cache");
|
||||||
image_cache_dir.push("image");
|
image_cache_dir.push("image");
|
||||||
fs::create_dir_all(&image_cache_dir).await?;
|
fs::create_dir_all(&image_cache_dir).await?;
|
||||||
|
|
||||||
|
debug!("loading zone0 uuid");
|
||||||
let mut host_uuid_path = store_dir.clone();
|
let mut host_uuid_path = store_dir.clone();
|
||||||
host_uuid_path.push("host.uuid");
|
host_uuid_path.push("host.uuid");
|
||||||
let host_uuid = if host_uuid_path.is_file() {
|
let host_uuid = if host_uuid_path.is_file() {
|
||||||
@ -89,29 +100,46 @@ impl Daemon {
|
|||||||
generated
|
generated
|
||||||
};
|
};
|
||||||
|
|
||||||
|
debug!("validating zone asset directories");
|
||||||
let initrd_path = detect_zone_path(&store, "initrd")?;
|
let initrd_path = detect_zone_path(&store, "initrd")?;
|
||||||
let kernel_path = detect_zone_path(&store, "kernel")?;
|
let kernel_path = detect_zone_path(&store, "kernel")?;
|
||||||
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
|
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
|
||||||
|
|
||||||
|
debug!("initializing caches and hydrating zone state");
|
||||||
let seed = config.oci.seed.clone().map(PathBuf::from);
|
let seed = config.oci.seed.clone().map(PathBuf::from);
|
||||||
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
||||||
let runtime = Runtime::new(host_uuid).await?;
|
debug!("initializing core runtime");
|
||||||
let glt = ZoneLookupTable::new(0, host_uuid);
|
let runtime = Runtime::new().await?;
|
||||||
let zones_db_path = format!("{}/zones.db", store);
|
let zlt = ZoneLookupTable::new(0, host_uuid);
|
||||||
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
|
let db_path = format!("{}/krata.db", store);
|
||||||
|
let database = KrataDatabase::open(Path::new(&db_path))?;
|
||||||
|
let zones = ZoneStore::open(database.clone())?;
|
||||||
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
||||||
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
||||||
let idm = DaemonIdm::new(glt.clone()).await?;
|
debug!("starting IDM service");
|
||||||
|
let idm = DaemonIdm::new(zlt.clone()).await?;
|
||||||
let idm = idm.launch().await?;
|
let idm = idm.launch().await?;
|
||||||
let console = DaemonConsole::new(glt.clone()).await?;
|
debug!("initializing console interfaces");
|
||||||
|
let console = DaemonConsole::new(zlt.clone()).await?;
|
||||||
let console = console.launch().await?;
|
let console = console.launch().await?;
|
||||||
let (events, generator) =
|
let (events, generator) =
|
||||||
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
||||||
.await?;
|
.await?;
|
||||||
let runtime_for_reconciler = runtime.dupe().await?;
|
let runtime_for_reconciler = runtime.dupe().await?;
|
||||||
|
let ipv4_network = Ipv4Network::from_str(&config.network.ipv4.subnet)?;
|
||||||
|
let ipv6_network = Ipv6Network::from_str(&config.network.ipv6.subnet)?;
|
||||||
|
let network_reservation_store = NetworkReservationStore::open(database)?;
|
||||||
|
let network = NetworkAssignment::new(
|
||||||
|
host_uuid,
|
||||||
|
ipv4_network,
|
||||||
|
ipv6_network,
|
||||||
|
network_reservation_store,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
debug!("initializing zone reconciler");
|
||||||
let zone_reconciler = ZoneReconciler::new(
|
let zone_reconciler = ZoneReconciler::new(
|
||||||
devices.clone(),
|
devices.clone(),
|
||||||
glt.clone(),
|
zlt.clone(),
|
||||||
zones.clone(),
|
zones.clone(),
|
||||||
events.clone(),
|
events.clone(),
|
||||||
runtime_for_reconciler,
|
runtime_for_reconciler,
|
||||||
@ -120,6 +148,8 @@ impl Daemon {
|
|||||||
kernel_path,
|
kernel_path,
|
||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path,
|
addons_path,
|
||||||
|
network.clone(),
|
||||||
|
config.clone(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
||||||
@ -127,19 +157,21 @@ impl Daemon {
|
|||||||
|
|
||||||
// TODO: Create a way of abstracting early init tasks in kratad.
|
// TODO: Create a way of abstracting early init tasks in kratad.
|
||||||
// TODO: Make initial power management policy configurable.
|
// TODO: Make initial power management policy configurable.
|
||||||
// FIXME: Power management hypercalls fail when running as an L1 hypervisor.
|
let power = runtime.power_management_context().await?;
|
||||||
// let power = runtime.power_management_context().await?;
|
power.set_smt_policy(true).await?;
|
||||||
// power.set_smt_policy(true).await?;
|
power
|
||||||
// power
|
.set_scheduler_policy("performance".to_string())
|
||||||
// .set_scheduler_policy("performance".to_string())
|
.await?;
|
||||||
// .await?;
|
info!("power management initialized");
|
||||||
|
|
||||||
|
info!("krata daemon initialized");
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
store,
|
store,
|
||||||
_config: config,
|
_config: config,
|
||||||
glt,
|
zlt,
|
||||||
devices,
|
devices,
|
||||||
zones,
|
zones,
|
||||||
|
network,
|
||||||
events,
|
events,
|
||||||
zone_reconciler_task,
|
zone_reconciler_task,
|
||||||
zone_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
@ -152,13 +184,15 @@ impl Daemon {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
||||||
|
debug!("starting control service");
|
||||||
let control_service = DaemonControlService::new(
|
let control_service = DaemonControlService::new(
|
||||||
self.glt.clone(),
|
self.zlt.clone(),
|
||||||
self.devices.clone(),
|
self.devices.clone(),
|
||||||
self.events.clone(),
|
self.events.clone(),
|
||||||
self.console.clone(),
|
self.console.clone(),
|
||||||
self.idm.clone(),
|
self.idm.clone(),
|
||||||
self.zones.clone(),
|
self.zones.clone(),
|
||||||
|
self.network.clone(),
|
||||||
self.zone_reconciler_notify.clone(),
|
self.zone_reconciler_notify.clone(),
|
||||||
self.packer.clone(),
|
self.packer.clone(),
|
||||||
self.runtime.clone(),
|
self.runtime.clone(),
|
||||||
@ -181,6 +215,8 @@ impl Daemon {
|
|||||||
server = server.tls_config(tls_config)?;
|
server = server.tls_config(tls_config)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
server = server.http2_keepalive_interval(Some(Duration::from_secs(10)));
|
||||||
|
|
||||||
let server = server.add_service(ControlServiceServer::new(control_service));
|
let server = server.add_service(ControlServiceServer::new(control_service));
|
||||||
info!("listening on address {}", addr);
|
info!("listening on address {}", addr);
|
||||||
match addr {
|
match addr {
|
||||||
|
204
crates/daemon/src/network/assignment.rs
Normal file
204
crates/daemon/src/network/assignment.rs
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
use advmac::MacAddr6;
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use ipnetwork::{Ipv4Network, Ipv6Network};
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
net::{Ipv4Addr, Ipv6Addr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::db::network::{NetworkReservation, NetworkReservationStore};
|
||||||
|
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct NetworkAssignmentState {
|
||||||
|
pub ipv4: HashMap<Ipv4Addr, NetworkReservation>,
|
||||||
|
pub ipv6: HashMap<Ipv6Addr, NetworkReservation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NetworkAssignment {
|
||||||
|
ipv4_network: Ipv4Network,
|
||||||
|
ipv6_network: Ipv6Network,
|
||||||
|
gateway_ipv4: Ipv4Addr,
|
||||||
|
gateway_ipv6: Ipv6Addr,
|
||||||
|
gateway_mac: MacAddr6,
|
||||||
|
store: NetworkReservationStore,
|
||||||
|
state: Arc<RwLock<NetworkAssignmentState>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkAssignment {
|
||||||
|
pub async fn new(
|
||||||
|
host_uuid: Uuid,
|
||||||
|
ipv4_network: Ipv4Network,
|
||||||
|
ipv6_network: Ipv6Network,
|
||||||
|
store: NetworkReservationStore,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let mut state = NetworkAssignment::fetch_current_state(&store).await?;
|
||||||
|
let gateway_reservation = if let Some(reservation) = store.read(Uuid::nil()).await? {
|
||||||
|
reservation
|
||||||
|
} else {
|
||||||
|
NetworkAssignment::allocate(
|
||||||
|
&mut state,
|
||||||
|
&store,
|
||||||
|
Uuid::nil(),
|
||||||
|
ipv4_network,
|
||||||
|
ipv6_network,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
if store.read(host_uuid).await?.is_none() {
|
||||||
|
let _ = NetworkAssignment::allocate(
|
||||||
|
&mut state,
|
||||||
|
&store,
|
||||||
|
host_uuid,
|
||||||
|
ipv4_network,
|
||||||
|
ipv6_network,
|
||||||
|
Some(gateway_reservation.gateway_ipv4),
|
||||||
|
Some(gateway_reservation.gateway_ipv6),
|
||||||
|
Some(gateway_reservation.gateway_mac),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let assignment = NetworkAssignment {
|
||||||
|
ipv4_network,
|
||||||
|
ipv6_network,
|
||||||
|
gateway_ipv4: gateway_reservation.ipv4,
|
||||||
|
gateway_ipv6: gateway_reservation.ipv6,
|
||||||
|
gateway_mac: gateway_reservation.mac,
|
||||||
|
store,
|
||||||
|
state: Arc::new(RwLock::new(state)),
|
||||||
|
};
|
||||||
|
Ok(assignment)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_current_state(
|
||||||
|
store: &NetworkReservationStore,
|
||||||
|
) -> Result<NetworkAssignmentState> {
|
||||||
|
let reservations = store.list().await?;
|
||||||
|
let mut state = NetworkAssignmentState::default();
|
||||||
|
for reservation in reservations.values() {
|
||||||
|
state.ipv4.insert(reservation.ipv4, reservation.clone());
|
||||||
|
state.ipv6.insert(reservation.ipv6, reservation.clone());
|
||||||
|
}
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn allocate(
|
||||||
|
state: &mut NetworkAssignmentState,
|
||||||
|
store: &NetworkReservationStore,
|
||||||
|
uuid: Uuid,
|
||||||
|
ipv4_network: Ipv4Network,
|
||||||
|
ipv6_network: Ipv6Network,
|
||||||
|
gateway_ipv4: Option<Ipv4Addr>,
|
||||||
|
gateway_ipv6: Option<Ipv6Addr>,
|
||||||
|
gateway_mac: Option<MacAddr6>,
|
||||||
|
) -> Result<NetworkReservation> {
|
||||||
|
let found_ipv4: Option<Ipv4Addr> = ipv4_network
|
||||||
|
.iter()
|
||||||
|
.filter(|ip| {
|
||||||
|
ip.is_private() && !(ip.is_loopback() || ip.is_multicast() || ip.is_broadcast())
|
||||||
|
})
|
||||||
|
.filter(|ip| {
|
||||||
|
let last = ip.octets()[3];
|
||||||
|
// filter for IPs ending in .1 to .250 because .250+ can have special meaning
|
||||||
|
(1..250).contains(&last)
|
||||||
|
})
|
||||||
|
.find(|ip| !state.ipv4.contains_key(ip));
|
||||||
|
|
||||||
|
let found_ipv6: Option<Ipv6Addr> = ipv6_network
|
||||||
|
.iter()
|
||||||
|
.filter(|ip| !ip.is_loopback() && !ip.is_multicast())
|
||||||
|
.filter(|ip| {
|
||||||
|
let last = ip.octets()[15];
|
||||||
|
last > 0
|
||||||
|
})
|
||||||
|
.find(|ip| !state.ipv6.contains_key(ip));
|
||||||
|
|
||||||
|
let Some(ipv4) = found_ipv4 else {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"unable to allocate ipv4 address, assigned network is exhausted"
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(ipv6) = found_ipv6 else {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"unable to allocate ipv6 address, assigned network is exhausted"
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut mac = MacAddr6::random();
|
||||||
|
mac.set_local(true);
|
||||||
|
mac.set_multicast(false);
|
||||||
|
|
||||||
|
let reservation = NetworkReservation {
|
||||||
|
uuid: uuid.to_string(),
|
||||||
|
ipv4,
|
||||||
|
ipv6,
|
||||||
|
mac,
|
||||||
|
ipv4_prefix: ipv4_network.prefix(),
|
||||||
|
ipv6_prefix: ipv6_network.prefix(),
|
||||||
|
gateway_ipv4: gateway_ipv4.unwrap_or(ipv4),
|
||||||
|
gateway_ipv6: gateway_ipv6.unwrap_or(ipv6),
|
||||||
|
gateway_mac: gateway_mac.unwrap_or(mac),
|
||||||
|
};
|
||||||
|
state.ipv4.insert(ipv4, reservation.clone());
|
||||||
|
state.ipv6.insert(ipv6, reservation.clone());
|
||||||
|
store.update(uuid, reservation.clone()).await?;
|
||||||
|
Ok(reservation)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn assign(&self, uuid: Uuid) -> Result<NetworkReservation> {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
let reservation = NetworkAssignment::allocate(
|
||||||
|
&mut state,
|
||||||
|
&self.store,
|
||||||
|
uuid,
|
||||||
|
self.ipv4_network,
|
||||||
|
self.ipv6_network,
|
||||||
|
Some(self.gateway_ipv4),
|
||||||
|
Some(self.gateway_ipv6),
|
||||||
|
Some(self.gateway_mac),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(reservation)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn recall(&self, uuid: Uuid) -> Result<()> {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
self.store.remove(uuid).await?;
|
||||||
|
state
|
||||||
|
.ipv4
|
||||||
|
.retain(|_, reservation| reservation.uuid != uuid.to_string());
|
||||||
|
state
|
||||||
|
.ipv6
|
||||||
|
.retain(|_, reservation| reservation.uuid != uuid.to_string());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn retrieve(&self, uuid: Uuid) -> Result<Option<NetworkReservation>> {
|
||||||
|
self.store.read(uuid).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn reload(&self) -> Result<()> {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
let intermediate = NetworkAssignment::fetch_current_state(&self.store).await?;
|
||||||
|
*state = intermediate;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn read(&self) -> Result<NetworkAssignmentState> {
|
||||||
|
Ok(self.state.read().await.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn read_reservations(&self) -> Result<HashMap<Uuid, NetworkReservation>> {
|
||||||
|
self.store.list().await
|
||||||
|
}
|
||||||
|
}
|
1
crates/daemon/src/network/mod.rs
Normal file
1
crates/daemon/src/network/mod.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
pub mod assignment;
|
@ -1,41 +1,41 @@
|
|||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use krata::launchcfg::LaunchPackedFormat;
|
||||||
|
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
||||||
|
use krata::v1::common::{ZoneOciImageSpec, ZoneResourceStatus};
|
||||||
|
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||||
|
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy, ZoneLaunchNetwork};
|
||||||
|
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
||||||
|
use log::info;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use crate::config::{DaemonConfig, DaemonPciDeviceRdmReservePolicy};
|
||||||
use futures::StreamExt;
|
|
||||||
use krata::launchcfg::LaunchPackedFormat;
|
|
||||||
use krata::v1::common::ZoneOciImageSpec;
|
|
||||||
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
|
||||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
|
||||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
|
||||||
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
|
||||||
use log::info;
|
|
||||||
|
|
||||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
|
||||||
use crate::devices::DaemonDeviceManager;
|
use crate::devices::DaemonDeviceManager;
|
||||||
use crate::{
|
use crate::network::assignment::NetworkAssignment;
|
||||||
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
|
use crate::reconcile::zone::network_reservation_to_network_status;
|
||||||
zlt::ZoneLookupTable,
|
use crate::{reconcile::zone::ZoneReconcilerResult, zlt::ZoneLookupTable};
|
||||||
};
|
|
||||||
use krata::v1::common::zone_image_spec::Image;
|
use krata::v1::common::zone_image_spec::Image;
|
||||||
use tokio::fs::{self, File};
|
use tokio::fs::{self, File};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio_tar::Archive;
|
use tokio_tar::Archive;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
pub struct ZoneStarter<'a> {
|
pub struct ZoneCreator<'a> {
|
||||||
pub devices: &'a DaemonDeviceManager,
|
pub devices: &'a DaemonDeviceManager,
|
||||||
pub kernel_path: &'a Path,
|
pub kernel_path: &'a Path,
|
||||||
pub initrd_path: &'a Path,
|
pub initrd_path: &'a Path,
|
||||||
pub addons_path: &'a Path,
|
pub addons_path: &'a Path,
|
||||||
pub packer: &'a OciPackerService,
|
pub packer: &'a OciPackerService,
|
||||||
pub glt: &'a ZoneLookupTable,
|
pub network_assignment: &'a NetworkAssignment,
|
||||||
|
pub zlt: &'a ZoneLookupTable,
|
||||||
pub runtime: &'a Runtime,
|
pub runtime: &'a Runtime,
|
||||||
|
pub config: &'a DaemonConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZoneStarter<'_> {
|
impl ZoneCreator<'_> {
|
||||||
pub async fn oci_spec_tar_read_file(
|
pub async fn oci_spec_tar_read_file(
|
||||||
&self,
|
&self,
|
||||||
file: &Path,
|
file: &Path,
|
||||||
@ -75,8 +75,8 @@ impl ZoneStarter<'_> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
pub async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let Some(ref spec) = zone.spec else {
|
let Some(ref mut spec) = zone.spec else {
|
||||||
return Err(anyhow!("zone spec not specified"));
|
return Err(anyhow!("zone spec not specified"));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -174,10 +174,29 @@ impl ZoneStarter<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let reservation = self.network_assignment.assign(uuid).await?;
|
||||||
|
|
||||||
|
let mut initial_resources = spec.initial_resources.unwrap_or_default();
|
||||||
|
if initial_resources.target_cpus < 1 {
|
||||||
|
initial_resources.target_cpus = 1;
|
||||||
|
}
|
||||||
|
if initial_resources.target_cpus > initial_resources.max_cpus {
|
||||||
|
initial_resources.max_cpus = initial_resources.target_cpus;
|
||||||
|
}
|
||||||
|
spec.initial_resources = Some(initial_resources);
|
||||||
|
let kernel_options = spec.kernel_options.clone().unwrap_or_default();
|
||||||
let info = self
|
let info = self
|
||||||
.runtime
|
.runtime
|
||||||
.launch(ZoneLaunchRequest {
|
.launch(ZoneLaunchRequest {
|
||||||
format: LaunchPackedFormat::Squashfs,
|
format: match image.format {
|
||||||
|
OciPackedFormat::Squashfs => LaunchPackedFormat::Squashfs,
|
||||||
|
OciPackedFormat::Erofs => LaunchPackedFormat::Erofs,
|
||||||
|
_ => {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"oci image is in an invalid format, which isn't compatible with launch"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
},
|
||||||
uuid: Some(uuid),
|
uuid: Some(uuid),
|
||||||
name: if spec.name.is_empty() {
|
name: if spec.name.is_empty() {
|
||||||
None
|
None
|
||||||
@ -187,8 +206,10 @@ impl ZoneStarter<'_> {
|
|||||||
image,
|
image,
|
||||||
kernel,
|
kernel,
|
||||||
initrd,
|
initrd,
|
||||||
vcpus: spec.vcpus,
|
target_cpus: initial_resources.target_cpus,
|
||||||
mem: spec.mem,
|
max_cpus: initial_resources.max_cpus,
|
||||||
|
max_memory: initial_resources.max_memory,
|
||||||
|
target_memory: initial_resources.target_memory,
|
||||||
pcis,
|
pcis,
|
||||||
env: task
|
env: task
|
||||||
.environment
|
.environment
|
||||||
@ -196,18 +217,32 @@ impl ZoneStarter<'_> {
|
|||||||
.map(|x| (x.key.clone(), x.value.clone()))
|
.map(|x| (x.key.clone(), x.value.clone()))
|
||||||
.collect::<HashMap<_, _>>(),
|
.collect::<HashMap<_, _>>(),
|
||||||
run: empty_vec_optional(task.command.clone()),
|
run: empty_vec_optional(task.command.clone()),
|
||||||
debug: false,
|
kernel_verbose: kernel_options.verbose,
|
||||||
|
kernel_cmdline_append: kernel_options.cmdline_append,
|
||||||
addons_image: Some(self.addons_path.to_path_buf()),
|
addons_image: Some(self.addons_path.to_path_buf()),
|
||||||
|
network: ZoneLaunchNetwork {
|
||||||
|
ipv4: reservation.ipv4.to_string(),
|
||||||
|
ipv4_prefix: reservation.ipv4_prefix,
|
||||||
|
ipv6: reservation.ipv6.to_string(),
|
||||||
|
ipv6_prefix: reservation.ipv6_prefix,
|
||||||
|
gateway_ipv4: reservation.gateway_ipv4.to_string(),
|
||||||
|
gateway_ipv6: reservation.gateway_ipv6.to_string(),
|
||||||
|
zone_mac: reservation.mac,
|
||||||
|
nameservers: self.config.network.nameservers.clone(),
|
||||||
|
},
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
self.glt.associate(uuid, info.domid).await;
|
self.zlt.associate(uuid, info.domid).await;
|
||||||
info!("started zone {}", uuid);
|
info!("created zone {}", uuid);
|
||||||
zone.state = Some(ZoneState {
|
zone.status = Some(ZoneStatus {
|
||||||
status: ZoneStatus::Started.into(),
|
state: ZoneState::Created.into(),
|
||||||
network: Some(zoneinfo_to_networkstate(&info)),
|
network_status: Some(network_reservation_to_network_status(&reservation)),
|
||||||
exit_info: None,
|
exit_status: None,
|
||||||
error_info: None,
|
error_status: None,
|
||||||
host: self.glt.host_uuid().to_string(),
|
resource_status: Some(ZoneResourceStatus {
|
||||||
|
active_resources: Some(initial_resources),
|
||||||
|
}),
|
||||||
|
host: self.zlt.host_uuid().to_string(),
|
||||||
domid: info.domid,
|
domid: info.domid,
|
||||||
});
|
});
|
||||||
success.store(true, Ordering::Release);
|
success.store(true, Ordering::Release);
|
@ -5,35 +5,36 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use self::create::ZoneCreator;
|
||||||
|
use crate::config::DaemonConfig;
|
||||||
|
use crate::db::network::NetworkReservation;
|
||||||
|
use crate::network::assignment::NetworkAssignment;
|
||||||
|
use crate::{
|
||||||
|
db::zone::ZoneStore,
|
||||||
|
devices::DaemonDeviceManager,
|
||||||
|
event::{DaemonEvent, DaemonEventContext},
|
||||||
|
zlt::ZoneLookupTable,
|
||||||
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
|
common::{Zone, ZoneErrorStatus, ZoneExitStatus, ZoneNetworkStatus, ZoneState, ZoneStatus},
|
||||||
control::ZoneChangedEvent,
|
control::ZoneChangedEvent,
|
||||||
};
|
};
|
||||||
use krataoci::packer::service::OciPackerService;
|
use krataoci::packer::service::OciPackerService;
|
||||||
use kratart::{Runtime, ZoneInfo};
|
use kratart::Runtime;
|
||||||
use log::{error, info, trace, warn};
|
use log::{error, info, trace, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
sync::{
|
sync::{
|
||||||
mpsc::{channel, Receiver, Sender},
|
mpsc::{channel, Receiver, Sender},
|
||||||
Mutex, RwLock,
|
RwLock,
|
||||||
},
|
},
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
mod create;
|
||||||
db::ZoneStore,
|
|
||||||
devices::DaemonDeviceManager,
|
|
||||||
event::{DaemonEvent, DaemonEventContext},
|
|
||||||
zlt::ZoneLookupTable,
|
|
||||||
};
|
|
||||||
|
|
||||||
use self::start::ZoneStarter;
|
|
||||||
|
|
||||||
mod start;
|
|
||||||
|
|
||||||
const PARALLEL_LIMIT: u32 = 5;
|
const PARALLEL_LIMIT: u32 = 5;
|
||||||
|
|
||||||
@ -44,16 +45,9 @@ enum ZoneReconcilerResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ZoneReconcilerEntry {
|
struct ZoneReconcilerEntry {
|
||||||
task: JoinHandle<()>,
|
|
||||||
sender: Sender<()>,
|
sender: Sender<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for ZoneReconcilerEntry {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.task.abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ZoneReconciler {
|
pub struct ZoneReconciler {
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
@ -65,9 +59,11 @@ pub struct ZoneReconciler {
|
|||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
addons_path: PathBuf,
|
addons_path: PathBuf,
|
||||||
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
tasks: Arc<RwLock<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
||||||
zone_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
zone_reconcile_lock: Arc<RwLock<()>>,
|
zone_reconcile_lock: Arc<RwLock<()>>,
|
||||||
|
ip_assignment: NetworkAssignment,
|
||||||
|
config: Arc<DaemonConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ZoneReconciler {
|
impl ZoneReconciler {
|
||||||
@ -83,6 +79,8 @@ impl ZoneReconciler {
|
|||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
modules_path: PathBuf,
|
modules_path: PathBuf,
|
||||||
|
ip_assignment: NetworkAssignment,
|
||||||
|
config: Arc<DaemonConfig>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
devices,
|
devices,
|
||||||
@ -94,9 +92,11 @@ impl ZoneReconciler {
|
|||||||
kernel_path,
|
kernel_path,
|
||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path: modules_path,
|
addons_path: modules_path,
|
||||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
tasks: Arc::new(RwLock::new(HashMap::new())),
|
||||||
zone_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||||
|
ip_assignment,
|
||||||
|
config,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ impl ZoneReconciler {
|
|||||||
error!("failed to start zone reconciler task {}: {}", uuid, error);
|
error!("failed to start zone reconciler task {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let map = self.tasks.lock().await;
|
let map = self.tasks.read().await;
|
||||||
if let Some(entry) = map.get(&uuid) {
|
if let Some(entry) = map.get(&uuid) {
|
||||||
if let Err(error) = entry.sender.send(()).await {
|
if let Err(error) = entry.sender.send(()).await {
|
||||||
error!("failed to notify zone reconciler task {}: {}", uuid, error);
|
error!("failed to notify zone reconciler task {}: {}", uuid, error);
|
||||||
@ -132,7 +132,7 @@ impl ZoneReconciler {
|
|||||||
error!("runtime reconciler failed: {}", error);
|
error!("runtime reconciler failed: {}", error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@ -166,21 +166,21 @@ impl ZoneReconciler {
|
|||||||
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
||||||
match runtime_zone {
|
match runtime_zone {
|
||||||
None => {
|
None => {
|
||||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
|
||||||
if state.status() == ZoneStatus::Started {
|
if status.state() == ZoneState::Created {
|
||||||
state.status = ZoneStatus::Starting.into();
|
status.state = ZoneState::Creating.into();
|
||||||
}
|
}
|
||||||
stored_zone.state = Some(state);
|
stored_zone.status = Some(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(runtime) => {
|
Some(runtime) => {
|
||||||
self.zlt.associate(uuid, runtime.domid).await;
|
self.zlt.associate(uuid, runtime.domid).await;
|
||||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
|
||||||
if let Some(code) = runtime.state.exit_code {
|
if let Some(code) = runtime.state.exit_code {
|
||||||
state.status = ZoneStatus::Exited.into();
|
status.state = ZoneState::Exited.into();
|
||||||
state.exit_info = Some(ZoneExitInfo { code });
|
status.exit_status = Some(ZoneExitStatus { code });
|
||||||
} else {
|
} else {
|
||||||
state.status = ZoneStatus::Started.into();
|
status.state = ZoneState::Created.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
for device in &stored_zone
|
for device in &stored_zone
|
||||||
@ -193,8 +193,11 @@ impl ZoneReconciler {
|
|||||||
device_claims.insert(device.name.clone(), uuid);
|
device_claims.insert(device.name.clone(), uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
state.network = Some(zoneinfo_to_networkstate(runtime));
|
if let Some(reservation) = self.ip_assignment.retrieve(uuid).await? {
|
||||||
stored_zone.state = Some(state);
|
status.network_status =
|
||||||
|
Some(network_reservation_to_network_status(&reservation));
|
||||||
|
}
|
||||||
|
stored_zone.status = Some(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,20 +231,20 @@ impl ZoneReconciler {
|
|||||||
zone: Some(zone.clone()),
|
zone: Some(zone.clone()),
|
||||||
}))?;
|
}))?;
|
||||||
|
|
||||||
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let start_state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
|
||||||
let result = match start_status {
|
let result = match start_state {
|
||||||
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
|
ZoneState::Creating => self.create(uuid, &mut zone).await,
|
||||||
ZoneStatus::Exited => self.exited(&mut zone).await,
|
ZoneState::Exited => self.exited(&mut zone).await,
|
||||||
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
|
ZoneState::Destroying => self.destroy(uuid, &mut zone).await,
|
||||||
_ => Ok(ZoneReconcilerResult::Unchanged),
|
_ => Ok(ZoneReconcilerResult::Unchanged),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = match result {
|
let result = match result {
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
|
||||||
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
|
zone.status.as_mut().unwrap().state = ZoneState::Failed.into();
|
||||||
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
|
zone.status.as_mut().unwrap().error_status = Some(ZoneErrorStatus {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
});
|
});
|
||||||
warn!("failed to start zone {}: {}", zone.id, error);
|
warn!("failed to start zone {}: {}", zone.id, error);
|
||||||
@ -251,8 +254,8 @@ impl ZoneReconciler {
|
|||||||
|
|
||||||
info!("reconciled zone {}", uuid);
|
info!("reconciled zone {}", uuid);
|
||||||
|
|
||||||
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
|
||||||
let destroyed = status == ZoneStatus::Destroyed;
|
let destroyed = state == ZoneState::Destroyed;
|
||||||
|
|
||||||
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
||||||
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||||
@ -261,7 +264,7 @@ impl ZoneReconciler {
|
|||||||
|
|
||||||
if destroyed {
|
if destroyed {
|
||||||
self.zones.remove(uuid).await?;
|
self.zones.remove(uuid).await?;
|
||||||
let mut map = self.tasks.lock().await;
|
let mut map = self.tasks.write().await;
|
||||||
map.remove(&uuid);
|
map.remove(&uuid);
|
||||||
} else {
|
} else {
|
||||||
self.zones.update(uuid, zone.clone()).await?;
|
self.zones.update(uuid, zone.clone()).await?;
|
||||||
@ -276,22 +279,24 @@ impl ZoneReconciler {
|
|||||||
Ok(rerun)
|
Ok(rerun)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let starter = ZoneStarter {
|
let starter = ZoneCreator {
|
||||||
devices: &self.devices,
|
devices: &self.devices,
|
||||||
kernel_path: &self.kernel_path,
|
kernel_path: &self.kernel_path,
|
||||||
initrd_path: &self.initrd_path,
|
initrd_path: &self.initrd_path,
|
||||||
addons_path: &self.addons_path,
|
addons_path: &self.addons_path,
|
||||||
packer: &self.packer,
|
packer: &self.packer,
|
||||||
glt: &self.zlt,
|
network_assignment: &self.ip_assignment,
|
||||||
|
zlt: &self.zlt,
|
||||||
runtime: &self.runtime,
|
runtime: &self.runtime,
|
||||||
|
config: &self.config,
|
||||||
};
|
};
|
||||||
starter.start(uuid, zone).await
|
starter.create(uuid, zone).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
if let Some(ref mut state) = zone.state {
|
if let Some(ref mut status) = zone.status {
|
||||||
state.set_status(ZoneStatus::Destroying);
|
status.set_state(ZoneState::Destroying);
|
||||||
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
||||||
} else {
|
} else {
|
||||||
Ok(ZoneReconcilerResult::Unchanged)
|
Ok(ZoneReconcilerResult::Unchanged)
|
||||||
@ -303,18 +308,20 @@ impl ZoneReconciler {
|
|||||||
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let domid = zone.state.as_ref().map(|x| x.domid);
|
let domid = zone.status.as_ref().map(|x| x.domid);
|
||||||
|
|
||||||
if let Some(domid) = domid {
|
if let Some(domid) = domid {
|
||||||
self.zlt.remove(uuid, domid).await;
|
self.zlt.remove(uuid, domid).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("destroyed zone {}", uuid);
|
info!("destroyed zone {}", uuid);
|
||||||
zone.state = Some(ZoneState {
|
self.ip_assignment.recall(uuid).await?;
|
||||||
status: ZoneStatus::Destroyed.into(),
|
zone.status = Some(ZoneStatus {
|
||||||
network: None,
|
state: ZoneState::Destroyed.into(),
|
||||||
exit_info: None,
|
network_status: None,
|
||||||
error_info: None,
|
exit_status: None,
|
||||||
|
error_status: None,
|
||||||
|
resource_status: None,
|
||||||
host: self.zlt.host_uuid().to_string(),
|
host: self.zlt.host_uuid().to_string(),
|
||||||
domid: domid.unwrap_or(u32::MAX),
|
domid: domid.unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
@ -323,7 +330,7 @@ impl ZoneReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
||||||
let mut map = self.tasks.lock().await;
|
let mut map = self.tasks.write().await;
|
||||||
match map.entry(uuid) {
|
match map.entry(uuid) {
|
||||||
Entry::Occupied(_) => {}
|
Entry::Occupied(_) => {}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
@ -336,7 +343,7 @@ impl ZoneReconciler {
|
|||||||
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
|
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
let (sender, mut receiver) = channel(10);
|
let (sender, mut receiver) = channel(10);
|
||||||
let task = tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
'notify_loop: loop {
|
'notify_loop: loop {
|
||||||
if receiver.recv().await.is_none() {
|
if receiver.recv().await.is_none() {
|
||||||
break 'notify_loop;
|
break 'notify_loop;
|
||||||
@ -358,17 +365,17 @@ impl ZoneReconciler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(ZoneReconcilerEntry { task, sender })
|
Ok(ZoneReconcilerEntry { sender })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
|
pub fn network_reservation_to_network_status(ip: &NetworkReservation) -> ZoneNetworkStatus {
|
||||||
ZoneNetworkState {
|
ZoneNetworkStatus {
|
||||||
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv4: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
||||||
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv6: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
||||||
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
|
zone_mac: ip.mac.to_string().to_lowercase().replace('-', ":"),
|
||||||
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv4: format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
|
||||||
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv6: format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
|
||||||
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
gateway_mac: ip.gateway_mac.to_string().to_lowercase().replace('-', ":"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,16 +45,25 @@ message ExecStreamRequestStart {
|
|||||||
repeated ExecEnvVar environment = 1;
|
repeated ExecEnvVar environment = 1;
|
||||||
repeated string command = 2;
|
repeated string command = 2;
|
||||||
string working_directory = 3;
|
string working_directory = 3;
|
||||||
|
bool tty = 4;
|
||||||
|
ExecStreamRequestTerminalSize terminal_size = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecStreamRequestStdin {
|
message ExecStreamRequestStdin {
|
||||||
bytes data = 1;
|
bytes data = 1;
|
||||||
|
bool closed = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecStreamRequestTerminalSize {
|
||||||
|
uint32 rows = 1;
|
||||||
|
uint32 columns = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecStreamRequestUpdate {
|
message ExecStreamRequestUpdate {
|
||||||
oneof update {
|
oneof update {
|
||||||
ExecStreamRequestStart start = 1;
|
ExecStreamRequestStart start = 1;
|
||||||
ExecStreamRequestStdin stdin = 2;
|
ExecStreamRequestStdin stdin = 2;
|
||||||
|
ExecStreamRequestTerminalSize terminal_resize = 3;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ import "google/protobuf/struct.proto";
|
|||||||
message Zone {
|
message Zone {
|
||||||
string id = 1;
|
string id = 1;
|
||||||
ZoneSpec spec = 2;
|
ZoneSpec spec = 2;
|
||||||
ZoneState state = 3;
|
ZoneStatus status = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneSpec {
|
message ZoneSpec {
|
||||||
@ -21,11 +21,18 @@ message ZoneSpec {
|
|||||||
ZoneImageSpec kernel = 3;
|
ZoneImageSpec kernel = 3;
|
||||||
// If not specified, defaults to the daemon default initrd.
|
// If not specified, defaults to the daemon default initrd.
|
||||||
ZoneImageSpec initrd = 4;
|
ZoneImageSpec initrd = 4;
|
||||||
uint32 vcpus = 5;
|
ZoneResourceSpec initial_resources = 5;
|
||||||
uint64 mem = 6;
|
ZoneTaskSpec task = 6;
|
||||||
ZoneTaskSpec task = 7;
|
repeated ZoneSpecAnnotation annotations = 7;
|
||||||
repeated ZoneSpecAnnotation annotations = 8;
|
repeated ZoneSpecDevice devices = 8;
|
||||||
repeated ZoneSpecDevice devices = 9;
|
ZoneKernelOptionsSpec kernel_options = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ZoneResourceSpec {
|
||||||
|
uint64 max_memory = 1;
|
||||||
|
uint64 target_memory = 2;
|
||||||
|
uint32 max_cpus = 3;
|
||||||
|
uint32 target_cpus = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneImageSpec {
|
message ZoneImageSpec {
|
||||||
@ -34,6 +41,11 @@ message ZoneImageSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ZoneKernelOptionsSpec {
|
||||||
|
bool verbose = 1;
|
||||||
|
string cmdline_append = 2;
|
||||||
|
}
|
||||||
|
|
||||||
enum OciImageFormat {
|
enum OciImageFormat {
|
||||||
OCI_IMAGE_FORMAT_UNKNOWN = 0;
|
OCI_IMAGE_FORMAT_UNKNOWN = 0;
|
||||||
OCI_IMAGE_FORMAT_SQUASHFS = 1;
|
OCI_IMAGE_FORMAT_SQUASHFS = 1;
|
||||||
@ -51,6 +63,7 @@ message ZoneTaskSpec {
|
|||||||
repeated ZoneTaskSpecEnvVar environment = 1;
|
repeated ZoneTaskSpecEnvVar environment = 1;
|
||||||
repeated string command = 2;
|
repeated string command = 2;
|
||||||
string working_directory = 3;
|
string working_directory = 3;
|
||||||
|
bool tty = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneTaskSpecEnvVar {
|
message ZoneTaskSpecEnvVar {
|
||||||
@ -67,26 +80,27 @@ message ZoneSpecDevice {
|
|||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneState {
|
message ZoneStatus {
|
||||||
ZoneStatus status = 1;
|
ZoneState state = 1;
|
||||||
ZoneNetworkState network = 2;
|
ZoneNetworkStatus network_status = 2;
|
||||||
ZoneExitInfo exit_info = 3;
|
ZoneExitStatus exit_status = 3;
|
||||||
ZoneErrorInfo error_info = 4;
|
ZoneErrorStatus error_status = 4;
|
||||||
string host = 5;
|
string host = 5;
|
||||||
uint32 domid = 6;
|
uint32 domid = 6;
|
||||||
|
ZoneResourceStatus resource_status = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ZoneStatus {
|
enum ZoneState {
|
||||||
ZONE_STATUS_UNKNOWN = 0;
|
ZONE_STATE_UNKNOWN = 0;
|
||||||
ZONE_STATUS_STARTING = 1;
|
ZONE_STATE_CREATING = 1;
|
||||||
ZONE_STATUS_STARTED = 2;
|
ZONE_STATE_CREATED = 2;
|
||||||
ZONE_STATUS_EXITED = 3;
|
ZONE_STATE_EXITED = 3;
|
||||||
ZONE_STATUS_DESTROYING = 4;
|
ZONE_STATE_DESTROYING = 4;
|
||||||
ZONE_STATUS_DESTROYED = 5;
|
ZONE_STATE_DESTROYED = 5;
|
||||||
ZONE_STATUS_FAILED = 6;
|
ZONE_STATE_FAILED = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneNetworkState {
|
message ZoneNetworkStatus {
|
||||||
string zone_ipv4 = 1;
|
string zone_ipv4 = 1;
|
||||||
string zone_ipv6 = 2;
|
string zone_ipv6 = 2;
|
||||||
string zone_mac = 3;
|
string zone_mac = 3;
|
||||||
@ -95,14 +109,18 @@ message ZoneNetworkState {
|
|||||||
string gateway_mac = 6;
|
string gateway_mac = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneExitInfo {
|
message ZoneExitStatus {
|
||||||
int32 code = 1;
|
int32 code = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneErrorInfo {
|
message ZoneErrorStatus {
|
||||||
string message = 1;
|
string message = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ZoneResourceStatus {
|
||||||
|
ZoneResourceSpec active_resources = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message ZoneMetricNode {
|
message ZoneMetricNode {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
google.protobuf.Value value = 2;
|
google.protobuf.Value value = 2;
|
||||||
@ -116,3 +134,18 @@ enum ZoneMetricFormat {
|
|||||||
ZONE_METRIC_FORMAT_INTEGER = 2;
|
ZONE_METRIC_FORMAT_INTEGER = 2;
|
||||||
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
|
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message TerminalSize {
|
||||||
|
uint32 rows = 1;
|
||||||
|
uint32 columns = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NetworkReservation {
|
||||||
|
string uuid = 1;
|
||||||
|
string ipv4 = 2;
|
||||||
|
string ipv6 = 3;
|
||||||
|
string mac = 4;
|
||||||
|
string gateway_ipv4 = 5;
|
||||||
|
string gateway_ipv6 = 6;
|
||||||
|
string gateway_mac = 7;
|
||||||
|
}
|
||||||
|
@ -10,34 +10,46 @@ import "krata/idm/transport.proto";
|
|||||||
import "krata/v1/common.proto";
|
import "krata/v1/common.proto";
|
||||||
|
|
||||||
service ControlService {
|
service ControlService {
|
||||||
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
rpc GetHostStatus(GetHostStatusRequest) returns (GetHostStatusReply);
|
||||||
|
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
||||||
|
rpc GetHostCpuTopology(GetHostCpuTopologyRequest) returns (GetHostCpuTopologyReply);
|
||||||
|
rpc SetHostPowerManagementPolicy(SetHostPowerManagementPolicyRequest) returns (SetHostPowerManagementPolicyReply);
|
||||||
|
|
||||||
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
|
|
||||||
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
|
|
||||||
rpc ResolveZone(ResolveZoneRequest) returns (ResolveZoneReply);
|
|
||||||
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
|
|
||||||
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
||||||
|
|
||||||
rpc ExecZone(stream ExecZoneRequest) returns (stream ExecZoneReply);
|
rpc ListNetworkReservations(ListNetworkReservationsRequest) returns (ListNetworkReservationsReply);
|
||||||
|
|
||||||
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
|
|
||||||
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
|
|
||||||
|
|
||||||
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
|
||||||
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
|
||||||
|
|
||||||
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
|
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
|
||||||
|
|
||||||
rpc GetHostCpuTopology(HostCpuTopologyRequest) returns (HostCpuTopologyReply);
|
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
|
||||||
rpc SetHostPowerManagementPolicy(HostPowerManagementPolicy) returns (HostPowerManagementPolicy);
|
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
|
||||||
|
|
||||||
|
rpc ResolveZoneId(ResolveZoneIdRequest) returns (ResolveZoneIdReply);
|
||||||
|
|
||||||
|
rpc GetZone(GetZoneRequest) returns (GetZoneReply);
|
||||||
|
|
||||||
|
rpc UpdateZoneResources(UpdateZoneResourcesRequest) returns (UpdateZoneResourcesReply);
|
||||||
|
|
||||||
|
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
|
||||||
|
|
||||||
|
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
|
||||||
|
rpc ExecInsideZone(stream ExecInsideZoneRequest) returns (stream ExecInsideZoneReply);
|
||||||
|
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
|
||||||
|
|
||||||
|
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
||||||
|
|
||||||
|
rpc ReadHypervisorConsole(ReadHypervisorConsoleRequest) returns (ReadHypervisorConsoleReply);
|
||||||
}
|
}
|
||||||
|
|
||||||
message IdentifyHostRequest {}
|
message GetHostStatusRequest {}
|
||||||
|
|
||||||
message IdentifyHostReply {
|
message GetHostStatusReply {
|
||||||
string host_uuid = 1;
|
string host_uuid = 1;
|
||||||
uint32 host_domid = 2;
|
uint32 host_domid = 2;
|
||||||
string krata_version = 3;
|
string krata_version = 3;
|
||||||
|
string host_ipv4 = 4;
|
||||||
|
string host_ipv6 = 5;
|
||||||
|
string host_mac = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateZoneRequest {
|
message CreateZoneRequest {
|
||||||
@ -45,36 +57,46 @@ message CreateZoneRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message CreateZoneReply {
|
message CreateZoneReply {
|
||||||
string Zone_id = 1;
|
string zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyZoneRequest {
|
message DestroyZoneRequest {
|
||||||
string Zone_id = 1;
|
string zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyZoneReply {}
|
message DestroyZoneReply {}
|
||||||
|
|
||||||
message ResolveZoneRequest {
|
message ResolveZoneIdRequest {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ResolveZoneReply {
|
message ResolveZoneIdReply {
|
||||||
krata.v1.common.Zone Zone = 1;
|
string zone_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetZoneRequest {
|
||||||
|
string zone_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetZoneReply {
|
||||||
|
krata.v1.common.Zone zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListZonesRequest {}
|
message ListZonesRequest {}
|
||||||
|
|
||||||
message ListZonesReply {
|
message ListZonesReply {
|
||||||
repeated krata.v1.common.Zone Zones = 1;
|
repeated krata.v1.common.Zone zones = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecZoneRequest {
|
message ExecInsideZoneRequest {
|
||||||
string Zone_id = 1;
|
string zone_id = 1;
|
||||||
krata.v1.common.ZoneTaskSpec task = 2;
|
krata.v1.common.ZoneTaskSpec task = 2;
|
||||||
bytes data = 3;
|
bytes stdin = 3;
|
||||||
|
bool stdin_closed = 4;
|
||||||
|
krata.v1.common.TerminalSize terminal_size = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecZoneReply {
|
message ExecInsideZoneReply {
|
||||||
bool exited = 1;
|
bool exited = 1;
|
||||||
string error = 2;
|
string error = 2;
|
||||||
int32 exit_code = 3;
|
int32 exit_code = 3;
|
||||||
@ -83,8 +105,9 @@ message ExecZoneReply {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ZoneConsoleRequest {
|
message ZoneConsoleRequest {
|
||||||
string Zone_id = 1;
|
string zone_id = 1;
|
||||||
bytes data = 2;
|
bytes data = 2;
|
||||||
|
bool replay_history = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneConsoleReply {
|
message ZoneConsoleReply {
|
||||||
@ -95,16 +118,16 @@ message WatchEventsRequest {}
|
|||||||
|
|
||||||
message WatchEventsReply {
|
message WatchEventsReply {
|
||||||
oneof event {
|
oneof event {
|
||||||
ZoneChangedEvent Zone_changed = 1;
|
ZoneChangedEvent zone_changed = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message ZoneChangedEvent {
|
message ZoneChangedEvent {
|
||||||
krata.v1.common.Zone Zone = 1;
|
krata.v1.common.Zone zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadZoneMetricsRequest {
|
message ReadZoneMetricsRequest {
|
||||||
string Zone_id = 1;
|
string zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadZoneMetricsReply {
|
message ReadZoneMetricsReply {
|
||||||
@ -219,15 +242,34 @@ message HostCpuTopologyInfo {
|
|||||||
HostCpuTopologyClass class = 5;
|
HostCpuTopologyClass class = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message HostCpuTopologyRequest {}
|
message GetHostCpuTopologyRequest {}
|
||||||
|
|
||||||
message HostCpuTopologyReply {
|
message GetHostCpuTopologyReply {
|
||||||
repeated HostCpuTopologyInfo cpus = 1;
|
repeated HostCpuTopologyInfo cpus = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message HostPowerManagementPolicyRequest {}
|
message SetHostPowerManagementPolicyRequest {
|
||||||
|
|
||||||
message HostPowerManagementPolicy {
|
|
||||||
string scheduler = 1;
|
string scheduler = 1;
|
||||||
bool smt_awareness = 2;
|
bool smt_awareness = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message SetHostPowerManagementPolicyReply {}
|
||||||
|
|
||||||
|
message UpdateZoneResourcesRequest {
|
||||||
|
string zone_id = 1;
|
||||||
|
krata.v1.common.ZoneResourceSpec resources = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UpdateZoneResourcesReply {}
|
||||||
|
|
||||||
|
message ReadHypervisorConsoleRequest {}
|
||||||
|
|
||||||
|
message ReadHypervisorConsoleReply {
|
||||||
|
string data = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListNetworkReservationsRequest {}
|
||||||
|
|
||||||
|
message ListNetworkReservationsReply {
|
||||||
|
repeated krata.v1.common.NetworkReservation reservations = 1;
|
||||||
|
}
|
||||||
|
@ -9,13 +9,13 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use bytes::{BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
use log::{debug, error};
|
use log::{debug, error};
|
||||||
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
|
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io::{unix::AsyncFd, AsyncReadExt, AsyncWriteExt},
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
select,
|
select,
|
||||||
sync::{
|
sync::{
|
||||||
broadcast,
|
broadcast,
|
||||||
@ -34,7 +34,7 @@ use super::{
|
|||||||
|
|
||||||
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
|
type OneshotRequestMap<R> = Arc<Mutex<HashMap<u64, oneshot::Sender<<R as IdmRequest>::Response>>>>;
|
||||||
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
|
type StreamRequestMap<R> = Arc<Mutex<HashMap<u64, Sender<<R as IdmRequest>::Response>>>>;
|
||||||
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, mpsc::Sender<R>>>>;
|
type StreamRequestUpdateMap<R> = Arc<Mutex<HashMap<u64, Sender<R>>>>;
|
||||||
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
|
pub type IdmInternalClient = IdmClient<internal::Request, internal::Event>;
|
||||||
|
|
||||||
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
||||||
@ -43,12 +43,13 @@ const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait IdmBackend: Send {
|
pub trait IdmBackend: Send {
|
||||||
async fn recv(&mut self) -> Result<IdmTransportPacket>;
|
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>>;
|
||||||
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
|
async fn send(&mut self, packet: IdmTransportPacket) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct IdmFileBackend {
|
pub struct IdmFileBackend {
|
||||||
read_fd: Arc<Mutex<AsyncFd<File>>>,
|
read: Arc<Mutex<File>>,
|
||||||
|
read_buffer: BytesMut,
|
||||||
write: Arc<Mutex<File>>,
|
write: Arc<Mutex<File>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +58,8 @@ impl IdmFileBackend {
|
|||||||
IdmFileBackend::set_raw_port(&read_file)?;
|
IdmFileBackend::set_raw_port(&read_file)?;
|
||||||
IdmFileBackend::set_raw_port(&write_file)?;
|
IdmFileBackend::set_raw_port(&write_file)?;
|
||||||
Ok(IdmFileBackend {
|
Ok(IdmFileBackend {
|
||||||
read_fd: Arc::new(Mutex::new(AsyncFd::new(read_file)?)),
|
read: Arc::new(Mutex::new(read_file)),
|
||||||
|
read_buffer: BytesMut::new(),
|
||||||
write: Arc::new(Mutex::new(write_file)),
|
write: Arc::new(Mutex::new(write_file)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -72,26 +74,58 @@ impl IdmFileBackend {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl IdmBackend for IdmFileBackend {
|
impl IdmBackend for IdmFileBackend {
|
||||||
async fn recv(&mut self) -> Result<IdmTransportPacket> {
|
async fn recv(&mut self) -> Result<Vec<IdmTransportPacket>> {
|
||||||
let mut fd = self.read_fd.lock().await;
|
let mut data = vec![0; 8192];
|
||||||
let mut guard = fd.readable_mut().await?;
|
let mut first = true;
|
||||||
let b1 = guard.get_inner_mut().read_u8().await?;
|
'read_more: loop {
|
||||||
if b1 != 0xff {
|
let mut packets = Vec::new();
|
||||||
return Ok(IdmTransportPacket::default());
|
if !first {
|
||||||
}
|
if !packets.is_empty() {
|
||||||
let b2 = guard.get_inner_mut().read_u8().await?;
|
return Ok(packets);
|
||||||
if b2 != 0xff {
|
}
|
||||||
return Ok(IdmTransportPacket::default());
|
let size = self.read.lock().await.read(&mut data).await?;
|
||||||
}
|
self.read_buffer.extend_from_slice(&data[0..size]);
|
||||||
let size = guard.get_inner_mut().read_u32_le().await?;
|
}
|
||||||
if size == 0 {
|
first = false;
|
||||||
return Ok(IdmTransportPacket::default());
|
loop {
|
||||||
}
|
if self.read_buffer.len() < 6 {
|
||||||
let mut buffer = vec![0u8; size as usize];
|
continue 'read_more;
|
||||||
guard.get_inner_mut().read_exact(&mut buffer).await?;
|
}
|
||||||
match IdmTransportPacket::decode(buffer.as_slice()) {
|
|
||||||
Ok(packet) => Ok(packet),
|
let b1 = self.read_buffer[0];
|
||||||
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
|
let b2 = self.read_buffer[1];
|
||||||
|
|
||||||
|
if b1 != 0xff || b2 != 0xff {
|
||||||
|
self.read_buffer.clear();
|
||||||
|
continue 'read_more;
|
||||||
|
}
|
||||||
|
|
||||||
|
let size = (self.read_buffer[2] as u32
|
||||||
|
| (self.read_buffer[3] as u32) << 8
|
||||||
|
| (self.read_buffer[4] as u32) << 16
|
||||||
|
| (self.read_buffer[5] as u32) << 24) as usize;
|
||||||
|
let needed = size + 6;
|
||||||
|
if self.read_buffer.len() < needed {
|
||||||
|
continue 'read_more;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut packet = self.read_buffer.split_to(needed);
|
||||||
|
packet.advance(6);
|
||||||
|
|
||||||
|
match IdmTransportPacket::decode(packet) {
|
||||||
|
Ok(packet) => {
|
||||||
|
packets.push(packet);
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
return Err(anyhow!("received invalid idm packet: {}", error));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.read_buffer.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(packets);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,8 +437,9 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
|
|||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = backend.recv() => match x {
|
x = backend.recv() => match x {
|
||||||
Ok(packet) => {
|
Ok(packets) => {
|
||||||
if packet.channel != channel {
|
for packet in packets {
|
||||||
|
if packet.channel != channel {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,6 +495,7 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
|
|||||||
IdmTransportPacketForm::StreamRequestClosed => {
|
IdmTransportPacketForm::StreamRequestClosed => {
|
||||||
let mut update_streams = request_update_streams.lock().await;
|
let mut update_streams = request_update_streams.lock().await;
|
||||||
update_streams.remove(&packet.id);
|
update_streams.remove(&packet.id);
|
||||||
|
println!("stream request closed: {}", packet.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
IdmTransportPacketForm::StreamResponseUpdate => {
|
IdmTransportPacketForm::StreamResponseUpdate => {
|
||||||
@ -478,6 +514,7 @@ impl<R: IdmRequest, E: IdmSerializable> IdmClient<R, E> {
|
|||||||
|
|
||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
|
@ -16,7 +16,7 @@ clap = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
etherparse = { workspace = true }
|
etherparse = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.14" }
|
krata = { path = "../krata", version = "^0.0.20" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
|
@ -76,44 +76,44 @@ impl AutoNetworkWatcher {
|
|||||||
|
|
||||||
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
||||||
for (uuid, zone) in &all_zones {
|
for (uuid, zone) in &all_zones {
|
||||||
let Some(ref state) = zone.state else {
|
let Some(ref status) = zone.status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if state.domid == u32::MAX {
|
if status.domid == u32::MAX {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(ref network) = state.network else {
|
let Some(ref network_status) = status.network_status else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network.zone_ipv4) else {
|
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network_status.zone_ipv4) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network.zone_ipv6) else {
|
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network_status.zone_ipv6) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(zone_mac) = EthernetAddress::from_str(&network.zone_mac) else {
|
let Ok(zone_mac) = EthernetAddress::from_str(&network_status.zone_mac) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(gateway_ipv4_cidr) = Ipv4Cidr::from_str(&network.gateway_ipv4) else {
|
let Ok(gateway_ipv4_cidr) = Ipv4Cidr::from_str(&network_status.gateway_ipv4) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(gateway_ipv6_cidr) = Ipv6Cidr::from_str(&network.gateway_ipv6) else {
|
let Ok(gateway_ipv6_cidr) = Ipv6Cidr::from_str(&network_status.gateway_ipv6) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(gateway_mac) = EthernetAddress::from_str(&network.gateway_mac) else {
|
let Ok(gateway_mac) = EthernetAddress::from_str(&network_status.gateway_mac) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
networks.push(NetworkMetadata {
|
networks.push(NetworkMetadata {
|
||||||
domid: state.domid,
|
domid: status.domid,
|
||||||
uuid: *uuid,
|
uuid: *uuid,
|
||||||
zone: NetworkSide {
|
zone: NetworkSide {
|
||||||
ipv4: zone_ipv4_cidr,
|
ipv4: zone_ipv4_cidr,
|
||||||
@ -187,7 +187,7 @@ impl AutoNetworkWatcher {
|
|||||||
_ = sleep(Duration::from_secs(10)) => {
|
_ = sleep(Duration::from_secs(10)) => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,8 @@ impl NetworkBackend {
|
|||||||
let (tx_sender, tx_receiver) = channel::<BytesMut>(TX_CHANNEL_BUFFER_LEN);
|
let (tx_sender, tx_receiver) = channel::<BytesMut>(TX_CHANNEL_BUFFER_LEN);
|
||||||
let mut udev = ChannelDevice::new(mtu, Medium::Ethernet, tx_sender.clone());
|
let mut udev = ChannelDevice::new(mtu, Medium::Ethernet, tx_sender.clone());
|
||||||
let mac = self.metadata.gateway.mac;
|
let mac = self.metadata.gateway.mac;
|
||||||
let nat = Nat::new(mtu, proxy, mac, addresses.clone(), tx_sender.clone())?;
|
let local_cidrs = addresses.clone();
|
||||||
|
let nat = Nat::new(mtu, proxy, mac, local_cidrs, tx_sender.clone())?;
|
||||||
let hardware_addr = HardwareAddress::Ethernet(mac);
|
let hardware_addr = HardwareAddress::Ethernet(mac);
|
||||||
let config = Config::new(hardware_addr);
|
let config = Config::new(hardware_addr);
|
||||||
let mut iface = Interface::new(config, &mut udev, Instant::now());
|
let mut iface = Interface::new(config, &mut udev, Instant::now());
|
||||||
|
@ -1,21 +1,15 @@
|
|||||||
use std::{
|
use std::{io::ErrorKind, net::IpAddr};
|
||||||
io::ErrorKind,
|
|
||||||
net::{IpAddr, Ipv4Addr},
|
|
||||||
};
|
|
||||||
|
|
||||||
use advmac::MacAddr6;
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use log::error;
|
use log::error;
|
||||||
use smoltcp::wire::EthernetAddress;
|
use smoltcp::wire::{EthernetAddress, Ipv4Cidr, Ipv6Cidr};
|
||||||
use tokio::{select, task::JoinHandle};
|
use tokio::{select, task::JoinHandle};
|
||||||
use tokio_tun::Tun;
|
use tokio_tun::Tun;
|
||||||
|
|
||||||
use crate::vbridge::{BridgeJoinHandle, VirtualBridge};
|
use crate::vbridge::{BridgeJoinHandle, VirtualBridge};
|
||||||
|
|
||||||
const HOST_IPV4_ADDR: Ipv4Addr = Ipv4Addr::new(10, 75, 0, 1);
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum HostBridgeProcessSelect {
|
enum HostBridgeProcessSelect {
|
||||||
Send(Option<BytesMut>),
|
Send(Option<BytesMut>),
|
||||||
@ -27,7 +21,14 @@ pub struct HostBridge {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HostBridge {
|
impl HostBridge {
|
||||||
pub async fn new(mtu: usize, interface: String, bridge: &VirtualBridge) -> Result<HostBridge> {
|
pub async fn new(
|
||||||
|
mtu: usize,
|
||||||
|
interface: String,
|
||||||
|
bridge: &VirtualBridge,
|
||||||
|
ipv4: Ipv4Cidr,
|
||||||
|
ipv6: Ipv6Cidr,
|
||||||
|
mac: EthernetAddress,
|
||||||
|
) -> Result<HostBridge> {
|
||||||
let tun = Tun::builder()
|
let tun = Tun::builder()
|
||||||
.name(&interface)
|
.name(&interface)
|
||||||
.tap(true)
|
.tap(true)
|
||||||
@ -38,10 +39,6 @@ impl HostBridge {
|
|||||||
let (connection, handle, _) = rtnetlink::new_connection()?;
|
let (connection, handle, _) = rtnetlink::new_connection()?;
|
||||||
tokio::spawn(connection);
|
tokio::spawn(connection);
|
||||||
|
|
||||||
let mut mac = MacAddr6::random();
|
|
||||||
mac.set_local(true);
|
|
||||||
mac.set_multicast(false);
|
|
||||||
|
|
||||||
let mut links = handle.link().get().match_name(interface.clone()).execute();
|
let mut links = handle.link().get().match_name(interface.clone()).execute();
|
||||||
let link = links.try_next().await?;
|
let link = links.try_next().await?;
|
||||||
if link.is_none() {
|
if link.is_none() {
|
||||||
@ -54,25 +51,32 @@ impl HostBridge {
|
|||||||
|
|
||||||
handle
|
handle
|
||||||
.address()
|
.address()
|
||||||
.add(link.header.index, IpAddr::V4(HOST_IPV4_ADDR), 16)
|
.add(
|
||||||
|
link.header.index,
|
||||||
|
IpAddr::V4(ipv4.address().into()),
|
||||||
|
ipv4.prefix_len(),
|
||||||
|
)
|
||||||
.execute()
|
.execute()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
handle
|
handle
|
||||||
.address()
|
.address()
|
||||||
.add(link.header.index, IpAddr::V6(mac.to_link_local_ipv6()), 10)
|
.add(
|
||||||
|
link.header.index,
|
||||||
|
IpAddr::V6(ipv6.address().into()),
|
||||||
|
ipv6.prefix_len(),
|
||||||
|
)
|
||||||
.execute()
|
.execute()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
handle
|
handle
|
||||||
.link()
|
.link()
|
||||||
.set(link.header.index)
|
.set(link.header.index)
|
||||||
.address(mac.to_array().to_vec())
|
.address(mac.0.to_vec())
|
||||||
.up()
|
.up()
|
||||||
.execute()
|
.execute()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mac = EthernetAddress(mac.to_array());
|
|
||||||
let bridge_handle = bridge.join(mac).await?;
|
let bridge_handle = bridge.join(mac).await?;
|
||||||
|
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
|
@ -1,17 +1,21 @@
|
|||||||
use std::{collections::HashMap, time::Duration};
|
use std::{collections::HashMap, str::FromStr, time::Duration};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use autonet::{AutoNetworkChangeset, AutoNetworkWatcher, NetworkMetadata};
|
use autonet::{AutoNetworkChangeset, AutoNetworkWatcher, NetworkMetadata};
|
||||||
use futures::{future::join_all, TryFutureExt};
|
use futures::{future::join_all, TryFutureExt};
|
||||||
use hbridge::HostBridge;
|
use hbridge::HostBridge;
|
||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
dial::ControlDialAddress,
|
dial::ControlDialAddress,
|
||||||
v1::{common::Zone, control::control_service_client::ControlServiceClient},
|
v1::{
|
||||||
|
common::Zone,
|
||||||
|
control::{control_service_client::ControlServiceClient, GetHostStatusRequest},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use log::warn;
|
use log::warn;
|
||||||
|
use smoltcp::wire::{EthernetAddress, Ipv4Cidr, Ipv6Cidr};
|
||||||
use tokio::{task::JoinHandle, time::sleep};
|
use tokio::{task::JoinHandle, time::sleep};
|
||||||
use tonic::transport::Channel;
|
use tonic::{transport::Channel, Request};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use vbridge::VirtualBridge;
|
use vbridge::VirtualBridge;
|
||||||
|
|
||||||
@ -41,10 +45,27 @@ pub struct NetworkService {
|
|||||||
|
|
||||||
impl NetworkService {
|
impl NetworkService {
|
||||||
pub async fn new(control_address: ControlDialAddress) -> Result<NetworkService> {
|
pub async fn new(control_address: ControlDialAddress) -> Result<NetworkService> {
|
||||||
let control = ControlClientProvider::dial(control_address).await?;
|
let mut control = ControlClientProvider::dial(control_address).await?;
|
||||||
|
let host_status = control
|
||||||
|
.get_host_status(Request::new(GetHostStatusRequest {}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
let host_ipv4 = Ipv4Cidr::from_str(&host_status.host_ipv4)
|
||||||
|
.map_err(|_| anyhow!("failed to parse host ipv4 cidr"))?;
|
||||||
|
let host_ipv6 = Ipv6Cidr::from_str(&host_status.host_ipv6)
|
||||||
|
.map_err(|_| anyhow!("failed to parse host ipv6 cidr"))?;
|
||||||
|
let host_mac = EthernetAddress::from_str(&host_status.host_mac)
|
||||||
|
.map_err(|_| anyhow!("failed to parse host mac address"))?;
|
||||||
let bridge = VirtualBridge::new()?;
|
let bridge = VirtualBridge::new()?;
|
||||||
let hbridge =
|
let hbridge = HostBridge::new(
|
||||||
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
|
HOST_BRIDGE_MTU + EXTRA_MTU,
|
||||||
|
"krata0".to_string(),
|
||||||
|
&bridge,
|
||||||
|
host_ipv4,
|
||||||
|
host_ipv6,
|
||||||
|
host_mac,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
Ok(NetworkService {
|
Ok(NetworkService {
|
||||||
control,
|
control,
|
||||||
zones: HashMap::new(),
|
zones: HashMap::new(),
|
||||||
|
@ -25,7 +25,7 @@ async fn main() -> Result<()> {
|
|||||||
let (context, mut receiver) = OciProgressContext::create();
|
let (context, mut receiver) = OciProgressContext::create();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
if (receiver.changed().await).is_err() {
|
if receiver.changed().await.is_err() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let progress = receiver.borrow_and_update();
|
let progress = receiver.borrow_and_update();
|
||||||
|
@ -97,13 +97,13 @@ impl OciPackerBackend for OciPackerMkSquashfs {
|
|||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} else {
|
} else {
|
||||||
select! {
|
select! {
|
||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Some(writer) = writer {
|
if let Some(writer) = writer {
|
||||||
@ -172,13 +172,13 @@ impl OciPackerBackend for OciPackerMkfsErofs {
|
|||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} else {
|
} else {
|
||||||
select! {
|
select! {
|
||||||
status = &mut wait => {
|
status = &mut wait => {
|
||||||
break status;
|
break status;
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Some(writer) = writer {
|
if let Some(writer) = writer {
|
||||||
|
@ -228,7 +228,7 @@ impl OciBoundProgress {
|
|||||||
context.update(&progress);
|
context.update(&progress);
|
||||||
let mut receiver = self.context.subscribe();
|
let mut receiver = self.context.subscribe();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
while (receiver.changed().await).is_ok() {
|
while receiver.changed().await.is_ok() {
|
||||||
context
|
context
|
||||||
.sender
|
.sender
|
||||||
.send_replace(receiver.borrow_and_update().clone());
|
.send_replace(receiver.borrow_and_update().clone());
|
||||||
|
@ -12,20 +12,20 @@ resolver = "2"
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
backhand = { workspace = true }
|
backhand = { workspace = true }
|
||||||
ipnetwork = { workspace = true }
|
ipnetwork = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.14" }
|
krata = { path = "../krata", version = "^0.0.20" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.14" }
|
krata-oci = { path = "../oci", version = "^0.0.20" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
krata-loopdev = { path = "../loopdev", version = "^0.0.14" }
|
krata-loopdev = { path = "../loopdev", version = "^0.0.20" }
|
||||||
krata-xencall = { path = "../xen/xencall", version = "^0.0.14" }
|
krata-xencall = { path = "../xen/xencall", version = "^0.0.20" }
|
||||||
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.14" }
|
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.20" }
|
||||||
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.14" }
|
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.20" }
|
||||||
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.14" }
|
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.20" }
|
||||||
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.14" }
|
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.20" }
|
||||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.14" }
|
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.20" }
|
||||||
walkdir = { workspace = true }
|
walkdir = { workspace = true }
|
||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
|
|
||||||
|
@ -8,14 +8,11 @@ use anyhow::{anyhow, Result};
|
|||||||
use log::{debug, error};
|
use log::{debug, error};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
sync::{
|
sync::mpsc::{channel, Receiver, Sender},
|
||||||
broadcast,
|
|
||||||
mpsc::{channel, Receiver, Sender},
|
|
||||||
},
|
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use xenevtchn::EventChannel;
|
use xenevtchn::EventChannelService;
|
||||||
use xengnt::{sys::GrantRef, GrantTab, MappedMemory};
|
use xengnt::{sys::GrantRef, GrantTab, MappedMemory};
|
||||||
use xenstore::{XsdClient, XsdInterface};
|
use xenstore::{XsdClient, XsdInterface};
|
||||||
|
|
||||||
@ -43,7 +40,7 @@ pub struct ChannelService {
|
|||||||
typ: String,
|
typ: String,
|
||||||
use_reserved_ref: Option<u64>,
|
use_reserved_ref: Option<u64>,
|
||||||
backends: HashMap<u32, ChannelBackend>,
|
backends: HashMap<u32, ChannelBackend>,
|
||||||
evtchn: EventChannel,
|
evtchn: EventChannelService,
|
||||||
store: XsdClient,
|
store: XsdClient,
|
||||||
gnttab: GrantTab,
|
gnttab: GrantTab,
|
||||||
input_receiver: Receiver<(u32, Vec<u8>)>,
|
input_receiver: Receiver<(u32, Vec<u8>)>,
|
||||||
@ -62,14 +59,22 @@ impl ChannelService {
|
|||||||
)> {
|
)> {
|
||||||
let (input_sender, input_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
let (input_sender, input_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
||||||
let (output_sender, output_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
let (output_sender, output_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
||||||
|
|
||||||
|
debug!("opening xenevtchn");
|
||||||
|
let evtchn = EventChannelService::open().await?;
|
||||||
|
debug!("opening xenstore");
|
||||||
|
let store = XsdClient::open().await?;
|
||||||
|
debug!("opening xengnt");
|
||||||
|
let gnttab = GrantTab::open()?;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
ChannelService {
|
ChannelService {
|
||||||
typ,
|
typ,
|
||||||
use_reserved_ref,
|
use_reserved_ref,
|
||||||
backends: HashMap::new(),
|
backends: HashMap::new(),
|
||||||
evtchn: EventChannel::open().await?,
|
evtchn,
|
||||||
store: XsdClient::open().await?,
|
store,
|
||||||
gnttab: GrantTab::open()?,
|
gnttab,
|
||||||
input_sender: input_sender.clone(),
|
input_sender: input_sender.clone(),
|
||||||
input_receiver,
|
input_receiver,
|
||||||
output_sender,
|
output_sender,
|
||||||
@ -226,7 +231,7 @@ impl ChannelBackend {
|
|||||||
domid: u32,
|
domid: u32,
|
||||||
id: u32,
|
id: u32,
|
||||||
store: XsdClient,
|
store: XsdClient,
|
||||||
evtchn: EventChannel,
|
evtchn: EventChannelService,
|
||||||
gnttab: GrantTab,
|
gnttab: GrantTab,
|
||||||
output_sender: Sender<(u32, Option<Vec<u8>>)>,
|
output_sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||||
use_reserved_ref: Option<u64>,
|
use_reserved_ref: Option<u64>,
|
||||||
@ -265,7 +270,7 @@ pub struct KrataChannelBackendProcessor {
|
|||||||
id: u32,
|
id: u32,
|
||||||
domid: u32,
|
domid: u32,
|
||||||
store: XsdClient,
|
store: XsdClient,
|
||||||
evtchn: EventChannel,
|
evtchn: EventChannelService,
|
||||||
gnttab: GrantTab,
|
gnttab: GrantTab,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,28 +489,21 @@ impl KrataChannelBackendProcessor {
|
|||||||
},
|
},
|
||||||
|
|
||||||
x = channel.receiver.recv() => match x {
|
x = channel.receiver.recv() => match x {
|
||||||
Ok(_) => {
|
Some(_) => {
|
||||||
unsafe {
|
unsafe {
|
||||||
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;
|
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;
|
||||||
if !buffer.is_empty() {
|
if !buffer.is_empty() {
|
||||||
sender.send((self.domid, Some(buffer))).await?;
|
sender.send((self.domid, Some(buffer))).await?;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
channel.unmask_sender.send(channel.local_port).await?;
|
channel.unmask().await?;
|
||||||
},
|
},
|
||||||
|
|
||||||
Err(error) => {
|
None => {
|
||||||
match error {
|
break;
|
||||||
broadcast::error::RecvError::Closed => {
|
|
||||||
break;
|
|
||||||
},
|
|
||||||
error => {
|
|
||||||
return Err(anyhow!("failed to receive event notification: {}", error));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use std::{
|
|||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use ipnetwork::{Ipv4Network, Ipv6Network};
|
use ipnetwork::{Ipv4Network, Ipv6Network};
|
||||||
use log::error;
|
use log::{debug, error};
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use xenstore::{XsdClient, XsdInterface};
|
use xenstore::{XsdClient, XsdInterface};
|
||||||
@ -72,7 +72,9 @@ impl IpVendor {
|
|||||||
ipv4_network: Ipv4Network,
|
ipv4_network: Ipv4Network,
|
||||||
ipv6_network: Ipv6Network,
|
ipv6_network: Ipv6Network,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
debug!("fetching state from xenstore");
|
||||||
let mut state = IpVendor::fetch_stored_state(&store).await?;
|
let mut state = IpVendor::fetch_stored_state(&store).await?;
|
||||||
|
debug!("allocating IP set");
|
||||||
let (gateway_ipv4, gateway_ipv6) =
|
let (gateway_ipv4, gateway_ipv6) =
|
||||||
IpVendor::allocate_ipset(&mut state, host_uuid, ipv4_network, ipv6_network)?;
|
IpVendor::allocate_ipset(&mut state, host_uuid, ipv4_network, ipv6_network)?;
|
||||||
let vend = IpVendor {
|
let vend = IpVendor {
|
||||||
@ -84,11 +86,14 @@ impl IpVendor {
|
|||||||
gateway_ipv6,
|
gateway_ipv6,
|
||||||
state: Arc::new(RwLock::new(state)),
|
state: Arc::new(RwLock::new(state)),
|
||||||
};
|
};
|
||||||
|
debug!("IP vendor initialized!");
|
||||||
Ok(vend)
|
Ok(vend)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_stored_state(store: &XsdClient) -> Result<IpVendorState> {
|
async fn fetch_stored_state(store: &XsdClient) -> Result<IpVendorState> {
|
||||||
|
debug!("initializing default IP vendor state");
|
||||||
let mut state = IpVendorState::default();
|
let mut state = IpVendorState::default();
|
||||||
|
debug!("iterating over xen domains");
|
||||||
for domid_candidate in store.list("/local/domain").await? {
|
for domid_candidate in store.list("/local/domain").await? {
|
||||||
let dom_path = format!("/local/domain/{}", domid_candidate);
|
let dom_path = format!("/local/domain/{}", domid_candidate);
|
||||||
let Some(uuid) = store
|
let Some(uuid) = store
|
||||||
@ -119,6 +124,7 @@ impl IpVendor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
debug!("IP state hydrated");
|
||||||
Ok(state)
|
Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,19 +1,21 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::net::IpAddr;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use advmac::MacAddr6;
|
use advmac::MacAddr6;
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use ipnetwork::IpNetwork;
|
use tokio::sync::Semaphore;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
use krata::launchcfg::{
|
use krata::launchcfg::{
|
||||||
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
|
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
|
||||||
LaunchPackedFormat, LaunchRoot,
|
LaunchPackedFormat, LaunchRoot,
|
||||||
};
|
};
|
||||||
use krataoci::packer::OciPackedImage;
|
use krataoci::packer::OciPackedImage;
|
||||||
use tokio::sync::Semaphore;
|
pub use xenclient::{
|
||||||
use uuid::Uuid;
|
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
||||||
|
};
|
||||||
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
|
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
|
||||||
use xenplatform::domain::BaseDomainConfig;
|
use xenplatform::domain::BaseDomainConfig;
|
||||||
|
|
||||||
@ -22,24 +24,35 @@ use crate::RuntimeContext;
|
|||||||
|
|
||||||
use super::{ZoneInfo, ZoneState};
|
use super::{ZoneInfo, ZoneState};
|
||||||
|
|
||||||
pub use xenclient::{
|
|
||||||
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct ZoneLaunchRequest {
|
pub struct ZoneLaunchRequest {
|
||||||
pub format: LaunchPackedFormat,
|
pub format: LaunchPackedFormat,
|
||||||
pub kernel: Vec<u8>,
|
pub kernel: Vec<u8>,
|
||||||
pub initrd: Vec<u8>,
|
pub initrd: Vec<u8>,
|
||||||
pub uuid: Option<Uuid>,
|
pub uuid: Option<Uuid>,
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
pub vcpus: u32,
|
pub target_cpus: u32,
|
||||||
pub mem: u64,
|
pub max_cpus: u32,
|
||||||
|
pub target_memory: u64,
|
||||||
|
pub max_memory: u64,
|
||||||
pub env: HashMap<String, String>,
|
pub env: HashMap<String, String>,
|
||||||
pub run: Option<Vec<String>>,
|
pub run: Option<Vec<String>>,
|
||||||
pub pcis: Vec<PciDevice>,
|
pub pcis: Vec<PciDevice>,
|
||||||
pub debug: bool,
|
pub kernel_verbose: bool,
|
||||||
|
pub kernel_cmdline_append: String,
|
||||||
pub image: OciPackedImage,
|
pub image: OciPackedImage,
|
||||||
pub addons_image: Option<PathBuf>,
|
pub addons_image: Option<PathBuf>,
|
||||||
|
pub network: ZoneLaunchNetwork,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ZoneLaunchNetwork {
|
||||||
|
pub ipv4: String,
|
||||||
|
pub ipv4_prefix: u8,
|
||||||
|
pub ipv6: String,
|
||||||
|
pub ipv6_prefix: u8,
|
||||||
|
pub gateway_ipv4: String,
|
||||||
|
pub gateway_ipv6: String,
|
||||||
|
pub zone_mac: MacAddr6,
|
||||||
|
pub nameservers: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ZoneLauncher {
|
pub struct ZoneLauncher {
|
||||||
@ -58,15 +71,7 @@ impl ZoneLauncher {
|
|||||||
) -> Result<ZoneInfo> {
|
) -> Result<ZoneInfo> {
|
||||||
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
||||||
let xen_name = format!("krata-{uuid}");
|
let xen_name = format!("krata-{uuid}");
|
||||||
let mut gateway_mac = MacAddr6::random();
|
|
||||||
gateway_mac.set_local(true);
|
|
||||||
gateway_mac.set_multicast(false);
|
|
||||||
let mut zone_mac = MacAddr6::random();
|
|
||||||
zone_mac.set_local(true);
|
|
||||||
zone_mac.set_multicast(false);
|
|
||||||
|
|
||||||
let _launch_permit = self.launch_semaphore.acquire().await?;
|
let _launch_permit = self.launch_semaphore.acquire().await?;
|
||||||
let mut ip = context.ipvendor.assign(uuid).await?;
|
|
||||||
let launch_config = LaunchInfo {
|
let launch_config = LaunchInfo {
|
||||||
root: LaunchRoot {
|
root: LaunchRoot {
|
||||||
format: request.format.clone(),
|
format: request.format.clone(),
|
||||||
@ -81,20 +86,15 @@ impl ZoneLauncher {
|
|||||||
network: Some(LaunchNetwork {
|
network: Some(LaunchNetwork {
|
||||||
link: "eth0".to_string(),
|
link: "eth0".to_string(),
|
||||||
ipv4: LaunchNetworkIpv4 {
|
ipv4: LaunchNetworkIpv4 {
|
||||||
address: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
address: format!("{}/{}", request.network.ipv4, request.network.ipv4_prefix),
|
||||||
gateway: ip.gateway_ipv4.to_string(),
|
gateway: request.network.gateway_ipv4,
|
||||||
},
|
},
|
||||||
ipv6: LaunchNetworkIpv6 {
|
ipv6: LaunchNetworkIpv6 {
|
||||||
address: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
address: format!("{}/{}", request.network.ipv6, request.network.ipv6_prefix),
|
||||||
gateway: ip.gateway_ipv6.to_string(),
|
gateway: request.network.gateway_ipv6.to_string(),
|
||||||
},
|
},
|
||||||
resolver: LaunchNetworkResolver {
|
resolver: LaunchNetworkResolver {
|
||||||
nameservers: vec![
|
nameservers: request.network.nameservers,
|
||||||
"1.1.1.1".to_string(),
|
|
||||||
"1.0.0.1".to_string(),
|
|
||||||
"2606:4700:4700::1111".to_string(),
|
|
||||||
"2606:4700:4700::1001".to_string(),
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
env: request.env,
|
env: request.env,
|
||||||
@ -140,13 +140,17 @@ impl ZoneLauncher {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
let mut cmdline_options = ["console=hvc0"].to_vec();
|
let mut cmdline_options = ["console=hvc0"].to_vec();
|
||||||
if !request.debug {
|
if !request.kernel_verbose {
|
||||||
cmdline_options.push("quiet");
|
cmdline_options.push("quiet");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !request.kernel_cmdline_append.is_empty() {
|
||||||
|
cmdline_options.push(&request.kernel_cmdline_append);
|
||||||
|
}
|
||||||
|
|
||||||
let cmdline = cmdline_options.join(" ");
|
let cmdline = cmdline_options.join(" ");
|
||||||
|
|
||||||
let zone_mac_string = zone_mac.to_string().replace('-', ":");
|
let zone_mac_string = request.network.zone_mac.to_string().replace('-', ":");
|
||||||
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
|
|
||||||
|
|
||||||
let mut disks = vec![
|
let mut disks = vec![
|
||||||
DomainDisk {
|
DomainDisk {
|
||||||
@ -190,30 +194,6 @@ impl ZoneLauncher {
|
|||||||
let mut extra_keys = vec![
|
let mut extra_keys = vec![
|
||||||
("krata/uuid".to_string(), uuid.to_string()),
|
("krata/uuid".to_string(), uuid.to_string()),
|
||||||
("krata/loops".to_string(), loops.join(",")),
|
("krata/loops".to_string(), loops.join(",")),
|
||||||
(
|
|
||||||
"krata/network/zone/ipv4".to_string(),
|
|
||||||
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"krata/network/zone/ipv6".to_string(),
|
|
||||||
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"krata/network/zone/mac".to_string(),
|
|
||||||
zone_mac_string.clone(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"krata/network/gateway/ipv4".to_string(),
|
|
||||||
format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"krata/network/gateway/ipv6".to_string(),
|
|
||||||
format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"krata/network/gateway/mac".to_string(),
|
|
||||||
gateway_mac_string.clone(),
|
|
||||||
),
|
|
||||||
];
|
];
|
||||||
|
|
||||||
if let Some(name) = request.name.as_ref() {
|
if let Some(name) = request.name.as_ref() {
|
||||||
@ -222,8 +202,10 @@ impl ZoneLauncher {
|
|||||||
|
|
||||||
let config = DomainConfig {
|
let config = DomainConfig {
|
||||||
base: BaseDomainConfig {
|
base: BaseDomainConfig {
|
||||||
max_vcpus: request.vcpus,
|
max_vcpus: request.max_cpus,
|
||||||
mem_mb: request.mem,
|
target_vcpus: request.target_cpus,
|
||||||
|
max_mem_mb: request.max_memory,
|
||||||
|
target_mem_mb: request.target_memory,
|
||||||
kernel: request.kernel,
|
kernel: request.kernel,
|
||||||
initrd: request.initrd,
|
initrd: request.initrd,
|
||||||
cmdline,
|
cmdline,
|
||||||
@ -251,29 +233,14 @@ impl ZoneLauncher {
|
|||||||
extra_rw_paths: vec!["krata/zone".to_string()],
|
extra_rw_paths: vec!["krata/zone".to_string()],
|
||||||
};
|
};
|
||||||
match context.xen.create(&config).await {
|
match context.xen.create(&config).await {
|
||||||
Ok(created) => {
|
Ok(created) => Ok(ZoneInfo {
|
||||||
ip.commit().await?;
|
name: request.name.as_ref().map(|x| x.to_string()),
|
||||||
Ok(ZoneInfo {
|
uuid,
|
||||||
name: request.name.as_ref().map(|x| x.to_string()),
|
domid: created.domid,
|
||||||
uuid,
|
image: request.image.digest,
|
||||||
domid: created.domid,
|
loops: vec![],
|
||||||
image: request.image.digest,
|
state: ZoneState { exit_code: None },
|
||||||
loops: vec![],
|
}),
|
||||||
zone_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
|
|
||||||
zone_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
|
|
||||||
zone_mac: Some(zone_mac_string.clone()),
|
|
||||||
gateway_ipv4: Some(IpNetwork::new(
|
|
||||||
IpAddr::V4(ip.gateway_ipv4),
|
|
||||||
ip.ipv4_prefix,
|
|
||||||
)?),
|
|
||||||
gateway_ipv6: Some(IpNetwork::new(
|
|
||||||
IpAddr::V6(ip.gateway_ipv6),
|
|
||||||
ip.ipv6_prefix,
|
|
||||||
)?),
|
|
||||||
gateway_mac: Some(gateway_mac_string.clone()),
|
|
||||||
state: ZoneState { exit_code: None },
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
|
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
|
||||||
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;
|
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
use std::{fs, net::Ipv4Addr, path::PathBuf, str::FromStr, sync::Arc};
|
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use ip::IpVendor;
|
|
||||||
use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network};
|
|
||||||
use krataloopdev::LoopControl;
|
use krataloopdev::LoopControl;
|
||||||
use log::error;
|
use log::debug;
|
||||||
|
use std::{fs, path::PathBuf, str::FromStr, sync::Arc};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use xenclient::XenClient;
|
use xenclient::XenClient;
|
||||||
|
use xenplatform::domain::XEN_EXTRA_MEMORY_KB;
|
||||||
use xenstore::{XsdClient, XsdInterface};
|
use xenstore::{XsdClient, XsdInterface};
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
@ -19,7 +18,6 @@ use self::{
|
|||||||
pub mod autoloop;
|
pub mod autoloop;
|
||||||
pub mod cfgblk;
|
pub mod cfgblk;
|
||||||
pub mod channel;
|
pub mod channel;
|
||||||
pub mod ip;
|
|
||||||
pub mod launch;
|
pub mod launch;
|
||||||
pub mod power;
|
pub mod power;
|
||||||
|
|
||||||
@ -48,12 +46,6 @@ pub struct ZoneInfo {
|
|||||||
pub domid: u32,
|
pub domid: u32,
|
||||||
pub image: String,
|
pub image: String,
|
||||||
pub loops: Vec<ZoneLoopInfo>,
|
pub loops: Vec<ZoneLoopInfo>,
|
||||||
pub zone_ipv4: Option<IpNetwork>,
|
|
||||||
pub zone_ipv6: Option<IpNetwork>,
|
|
||||||
pub zone_mac: Option<String>,
|
|
||||||
pub gateway_ipv4: Option<IpNetwork>,
|
|
||||||
pub gateway_ipv6: Option<IpNetwork>,
|
|
||||||
pub gateway_mac: Option<String>,
|
|
||||||
pub state: ZoneState,
|
pub state: ZoneState,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,20 +53,14 @@ pub struct ZoneInfo {
|
|||||||
pub struct RuntimeContext {
|
pub struct RuntimeContext {
|
||||||
pub autoloop: AutoLoop,
|
pub autoloop: AutoLoop,
|
||||||
pub xen: XenClient<RuntimePlatform>,
|
pub xen: XenClient<RuntimePlatform>,
|
||||||
pub ipvendor: IpVendor,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RuntimeContext {
|
impl RuntimeContext {
|
||||||
pub async fn new(host_uuid: Uuid) -> Result<Self> {
|
pub async fn new() -> Result<Self> {
|
||||||
let xen = XenClient::new(0, RuntimePlatform::new()).await?;
|
let xen = XenClient::new(0, RuntimePlatform::new()).await?;
|
||||||
let ipv4_network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
|
|
||||||
let ipv6_network = Ipv6Network::from_str("fdd4:1476:6c7e::/48")?;
|
|
||||||
let ipvend =
|
|
||||||
IpVendor::new(xen.store.clone(), host_uuid, ipv4_network, ipv6_network).await?;
|
|
||||||
Ok(RuntimeContext {
|
Ok(RuntimeContext {
|
||||||
autoloop: AutoLoop::new(LoopControl::open()?),
|
autoloop: AutoLoop::new(LoopControl::open()?),
|
||||||
xen,
|
xen,
|
||||||
ipvendor: ipvend,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,61 +101,6 @@ impl RuntimeContext {
|
|||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/loops", &dom_path))
|
.read_string(&format!("{}/krata/loops", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let zone_ipv4 = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/zone/ipv4", &dom_path))
|
|
||||||
.await?;
|
|
||||||
let zone_ipv6 = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/zone/ipv6", &dom_path))
|
|
||||||
.await?;
|
|
||||||
let zone_mac = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/zone/mac", &dom_path))
|
|
||||||
.await?;
|
|
||||||
let gateway_ipv4 = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/gateway/ipv4", &dom_path))
|
|
||||||
.await?;
|
|
||||||
let gateway_ipv6 = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/gateway/ipv6", &dom_path))
|
|
||||||
.await?;
|
|
||||||
let gateway_mac = self
|
|
||||||
.xen
|
|
||||||
.store
|
|
||||||
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let zone_ipv4 = if let Some(zone_ipv4) = zone_ipv4 {
|
|
||||||
IpNetwork::from_str(&zone_ipv4).ok()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let zone_ipv6 = if let Some(zone_ipv6) = zone_ipv6 {
|
|
||||||
IpNetwork::from_str(&zone_ipv6).ok()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let gateway_ipv4 = if let Some(gateway_ipv4) = gateway_ipv4 {
|
|
||||||
IpNetwork::from_str(&gateway_ipv4).ok()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let gateway_ipv6 = if let Some(gateway_ipv6) = gateway_ipv6 {
|
|
||||||
IpNetwork::from_str(&gateway_ipv6).ok()
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let exit_code = self
|
let exit_code = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
@ -190,12 +121,6 @@ impl RuntimeContext {
|
|||||||
domid,
|
domid,
|
||||||
image,
|
image,
|
||||||
loops,
|
loops,
|
||||||
zone_ipv4,
|
|
||||||
zone_ipv6,
|
|
||||||
zone_mac,
|
|
||||||
gateway_ipv4,
|
|
||||||
gateway_ipv6,
|
|
||||||
gateway_mac,
|
|
||||||
state,
|
state,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -237,16 +162,21 @@ impl RuntimeContext {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Runtime {
|
pub struct Runtime {
|
||||||
host_uuid: Uuid,
|
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
launch_semaphore: Arc<Semaphore>,
|
launch_semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Runtime {
|
impl Runtime {
|
||||||
pub async fn new(host_uuid: Uuid) -> Result<Self> {
|
pub async fn new() -> Result<Self> {
|
||||||
let context = RuntimeContext::new(host_uuid).await?;
|
let context = RuntimeContext::new().await?;
|
||||||
|
debug!("testing for hypervisor presence");
|
||||||
|
context
|
||||||
|
.xen
|
||||||
|
.call
|
||||||
|
.get_version_capabilities()
|
||||||
|
.await
|
||||||
|
.map_err(|_| anyhow!("hypervisor is not present"))?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
host_uuid,
|
|
||||||
context,
|
context,
|
||||||
launch_semaphore: Arc::new(Semaphore::new(10)),
|
launch_semaphore: Arc::new(Semaphore::new(10)),
|
||||||
})
|
})
|
||||||
@ -282,11 +212,6 @@ impl Runtime {
|
|||||||
return Err(anyhow!("unable to find krata uuid based on the domain",));
|
return Err(anyhow!("unable to find krata uuid based on the domain",));
|
||||||
}
|
}
|
||||||
let uuid = Uuid::parse_str(&uuid)?;
|
let uuid = Uuid::parse_str(&uuid)?;
|
||||||
let ip = self
|
|
||||||
.context
|
|
||||||
.ipvendor
|
|
||||||
.read_domain_assignment(uuid, domid)
|
|
||||||
.await?;
|
|
||||||
let loops = store
|
let loops = store
|
||||||
.read_string(format!("{}/krata/loops", dom_path).as_str())
|
.read_string(format!("{}/krata/loops", dom_path).as_str())
|
||||||
.await?;
|
.await?;
|
||||||
@ -306,17 +231,66 @@ impl Runtime {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(uuid)
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(ip) = ip {
|
pub async fn set_memory_resources(
|
||||||
if let Err(error) = self.context.ipvendor.recall(&ip).await {
|
&self,
|
||||||
error!(
|
domid: u32,
|
||||||
"failed to recall ip assignment for zone {}: {}",
|
target_memory_bytes: u64,
|
||||||
uuid, error
|
max_memory_bytes: u64,
|
||||||
);
|
) -> Result<()> {
|
||||||
}
|
let mut max_memory_bytes = max_memory_bytes + (XEN_EXTRA_MEMORY_KB * 1024);
|
||||||
|
if target_memory_bytes > max_memory_bytes {
|
||||||
|
max_memory_bytes = target_memory_bytes + (XEN_EXTRA_MEMORY_KB * 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(uuid)
|
self.context
|
||||||
|
.xen
|
||||||
|
.call
|
||||||
|
.set_max_mem(domid, max_memory_bytes / 1024)
|
||||||
|
.await?;
|
||||||
|
let domain_path = self.context.xen.store.get_domain_path(domid).await?;
|
||||||
|
let tx = self.context.xen.store.transaction().await?;
|
||||||
|
let max_memory_path = format!("{}/memory/static-max", domain_path);
|
||||||
|
tx.write_string(max_memory_path, &(max_memory_bytes / 1024).to_string())
|
||||||
|
.await?;
|
||||||
|
let target_memory_path = format!("{}/memory/target", domain_path);
|
||||||
|
tx.write_string(
|
||||||
|
target_memory_path,
|
||||||
|
&(target_memory_bytes / 1024).to_string(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
tx.commit().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_cpu_resources(&self, domid: u32, target_cpus: u32) -> Result<()> {
|
||||||
|
let domain_path = self.context.xen.store.get_domain_path(domid).await?;
|
||||||
|
let cpus = self
|
||||||
|
.context
|
||||||
|
.xen
|
||||||
|
.store
|
||||||
|
.list(&format!("{}/cpu", domain_path))
|
||||||
|
.await?;
|
||||||
|
let tx = self.context.xen.store.transaction().await?;
|
||||||
|
for cpu in cpus {
|
||||||
|
let Some(id) = cpu.parse::<u32>().ok() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let available = if id >= target_cpus {
|
||||||
|
"offline"
|
||||||
|
} else {
|
||||||
|
"online"
|
||||||
|
};
|
||||||
|
tx.write_string(
|
||||||
|
format!("{}/cpu/{}/availability", domain_path, id),
|
||||||
|
available,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
tx.commit().await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
||||||
@ -324,11 +298,23 @@ impl Runtime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn dupe(&self) -> Result<Runtime> {
|
pub async fn dupe(&self) -> Result<Runtime> {
|
||||||
Runtime::new(self.host_uuid).await
|
Runtime::new().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn power_management_context(&self) -> Result<PowerManagementContext> {
|
pub async fn power_management_context(&self) -> Result<PowerManagementContext> {
|
||||||
let context = RuntimeContext::new(self.host_uuid).await?;
|
let context = RuntimeContext::new().await?;
|
||||||
Ok(PowerManagementContext { context })
|
Ok(PowerManagementContext { context })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn read_hypervisor_console(&self, clear: bool) -> Result<Arc<str>> {
|
||||||
|
let index = 0_u32;
|
||||||
|
let (rawbuf, newindex) = self
|
||||||
|
.context
|
||||||
|
.xen
|
||||||
|
.call
|
||||||
|
.read_console_ring_raw(clear, index)
|
||||||
|
.await?;
|
||||||
|
let buf = std::str::from_utf8(&rawbuf[..newindex as usize])?;
|
||||||
|
Ok(Arc::from(buf))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
|
use log::info;
|
||||||
use xencall::sys::{CpuId, SysctlCputopo};
|
use xencall::sys::{CpuId, SysctlCputopo};
|
||||||
|
|
||||||
use crate::RuntimeContext;
|
use crate::RuntimeContext;
|
||||||
@ -151,7 +152,10 @@ impl PowerManagementContext {
|
|||||||
.xen
|
.xen
|
||||||
.call
|
.call
|
||||||
.set_turbo_mode(CpuId::All, enable)
|
.set_turbo_mode(CpuId::All, enable)
|
||||||
.await?;
|
.await
|
||||||
|
.unwrap_or_else(|error| {
|
||||||
|
info!("non-fatal error while setting SMT policy: {:?}", error);
|
||||||
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +165,13 @@ impl PowerManagementContext {
|
|||||||
.xen
|
.xen
|
||||||
.call
|
.call
|
||||||
.set_cpufreq_gov(CpuId::All, policy)
|
.set_cpufreq_gov(CpuId::All, policy)
|
||||||
.await?;
|
.await
|
||||||
|
.unwrap_or_else(|error| {
|
||||||
|
info!(
|
||||||
|
"non-fatal error while setting scheduler policy: {:?}",
|
||||||
|
error
|
||||||
|
);
|
||||||
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
18
crates/xen/xencall/examples/console_read.rs
Normal file
18
crates/xen/xencall/examples/console_read.rs
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
use xencall::error::Result;
|
||||||
|
use xencall::XenCall;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let call = XenCall::open(0)?;
|
||||||
|
let index = 0_u32;
|
||||||
|
let (buf, newindex) = call.read_console_ring_raw(false, index).await?;
|
||||||
|
|
||||||
|
match std::str::from_utf8(&buf[..newindex as usize]) {
|
||||||
|
Ok(v) => print!("{}", v),
|
||||||
|
_ => panic!("unable to decode Xen console messages"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -26,12 +26,12 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use sys::{
|
use sys::{
|
||||||
CpuId, E820Entry, ForeignMemoryMap, PhysdevMapPirq, Sysctl, SysctlCputopo, SysctlCputopoinfo,
|
CpuId, E820Entry, ForeignMemoryMap, PhysdevMapPirq, Sysctl, SysctlCputopo, SysctlCputopoinfo,
|
||||||
SysctlPhysinfo, SysctlPmOp, SysctlPmOpValue, SysctlSetCpuFreqGov, SysctlValue,
|
SysctlPhysinfo, SysctlPmOp, SysctlPmOpValue, SysctlReadconsole, SysctlSetCpuFreqGov,
|
||||||
VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, HYPERVISOR_SYSCTL, PHYSDEVOP_MAP_PIRQ,
|
SysctlValue, VcpuGuestContextAny, HYPERVISOR_PHYSDEV_OP, HYPERVISOR_SYSCTL, PHYSDEVOP_MAP_PIRQ,
|
||||||
XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP,
|
XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION, XEN_MEM_SET_MEMORY_MAP,
|
||||||
XEN_SYSCTL_CPUTOPOINFO, XEN_SYSCTL_MAX_INTERFACE_VERSION, XEN_SYSCTL_MIN_INTERFACE_VERSION,
|
XEN_SYSCTL_CPUTOPOINFO, XEN_SYSCTL_MAX_INTERFACE_VERSION, XEN_SYSCTL_MIN_INTERFACE_VERSION,
|
||||||
XEN_SYSCTL_PHYSINFO, XEN_SYSCTL_PM_OP, XEN_SYSCTL_PM_OP_DISABLE_TURBO,
|
XEN_SYSCTL_PHYSINFO, XEN_SYSCTL_PM_OP, XEN_SYSCTL_PM_OP_DISABLE_TURBO,
|
||||||
XEN_SYSCTL_PM_OP_ENABLE_TURBO,
|
XEN_SYSCTL_PM_OP_ENABLE_TURBO, XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV, XEN_SYSCTL_READCONSOLE,
|
||||||
};
|
};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
@ -1038,7 +1038,7 @@ impl XenCall {
|
|||||||
interface_version: self.sysctl_interface_version,
|
interface_version: self.sysctl_interface_version,
|
||||||
value: SysctlValue {
|
value: SysctlValue {
|
||||||
pm_op: SysctlPmOp {
|
pm_op: SysctlPmOp {
|
||||||
cmd: XEN_SYSCTL_PM_OP_ENABLE_TURBO,
|
cmd: XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV,
|
||||||
cpuid,
|
cpuid,
|
||||||
value: SysctlPmOpValue {
|
value: SysctlPmOpValue {
|
||||||
set_gov: SysctlSetCpuFreqGov { scaling_governor },
|
set_gov: SysctlSetCpuFreqGov { scaling_governor },
|
||||||
@ -1087,4 +1087,33 @@ impl XenCall {
|
|||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn read_console_ring_raw(
|
||||||
|
&self,
|
||||||
|
clear: bool,
|
||||||
|
index: u32,
|
||||||
|
) -> Result<([u8; 16384], u32)> {
|
||||||
|
let mut u8buf = [0u8; 16384];
|
||||||
|
let mut sysctl = Sysctl {
|
||||||
|
cmd: XEN_SYSCTL_READCONSOLE,
|
||||||
|
interface_version: self.sysctl_interface_version,
|
||||||
|
value: SysctlValue {
|
||||||
|
console: SysctlReadconsole {
|
||||||
|
clear: clear as u8,
|
||||||
|
incremental: 1,
|
||||||
|
pad: 0,
|
||||||
|
index,
|
||||||
|
buffer: addr_of_mut!(u8buf) as u64,
|
||||||
|
count: 16384,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
self.hypercall1(HYPERVISOR_SYSCTL, addr_of_mut!(sysctl) as c_ulong)
|
||||||
|
.await?;
|
||||||
|
// Safety: We are passing a SysctlReadconsole struct as part of the hypercall, and
|
||||||
|
// calling the hypercall is known to not change the underlying value outside changing
|
||||||
|
// the values on some SysctlReadconsole fields.
|
||||||
|
let newindex = unsafe { sysctl.value.console.index };
|
||||||
|
Ok((u8buf, newindex))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -752,6 +752,7 @@ pub struct SysctlCputopoinfo {
|
|||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub union SysctlValue {
|
pub union SysctlValue {
|
||||||
|
pub console: SysctlReadconsole,
|
||||||
pub cputopoinfo: SysctlCputopoinfo,
|
pub cputopoinfo: SysctlCputopoinfo,
|
||||||
pub pm_op: SysctlPmOp,
|
pub pm_op: SysctlPmOp,
|
||||||
pub phys_info: SysctlPhysinfo,
|
pub phys_info: SysctlPhysinfo,
|
||||||
@ -765,12 +766,14 @@ pub struct Sysctl {
|
|||||||
pub value: SysctlValue,
|
pub value: SysctlValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub const XEN_SYSCTL_READCONSOLE: u32 = 1;
|
||||||
pub const XEN_SYSCTL_PHYSINFO: u32 = 3;
|
pub const XEN_SYSCTL_PHYSINFO: u32 = 3;
|
||||||
pub const XEN_SYSCTL_PM_OP: u32 = 12;
|
pub const XEN_SYSCTL_PM_OP: u32 = 12;
|
||||||
pub const XEN_SYSCTL_CPUTOPOINFO: u32 = 16;
|
pub const XEN_SYSCTL_CPUTOPOINFO: u32 = 16;
|
||||||
|
|
||||||
pub const XEN_SYSCTL_MIN_INTERFACE_VERSION: u32 = 0x00000015;
|
pub const XEN_SYSCTL_MIN_INTERFACE_VERSION: u32 = 0x00000015;
|
||||||
pub const XEN_SYSCTL_MAX_INTERFACE_VERSION: u32 = 0x00000020;
|
pub const XEN_SYSCTL_MAX_INTERFACE_VERSION: u32 = 0x00000020;
|
||||||
|
pub const XEN_SYSCTL_PM_OP_SET_CPUFREQ_GOV: u32 = 0x12;
|
||||||
pub const XEN_SYSCTL_PM_OP_SET_SCHED_OPT_STMT: u32 = 0x21;
|
pub const XEN_SYSCTL_PM_OP_SET_SCHED_OPT_STMT: u32 = 0x21;
|
||||||
pub const XEN_SYSCTL_PM_OP_ENABLE_TURBO: u32 = 0x26;
|
pub const XEN_SYSCTL_PM_OP_ENABLE_TURBO: u32 = 0x26;
|
||||||
pub const XEN_SYSCTL_PM_OP_DISABLE_TURBO: u32 = 0x27;
|
pub const XEN_SYSCTL_PM_OP_DISABLE_TURBO: u32 = 0x27;
|
||||||
@ -801,3 +804,14 @@ pub struct SysctlPhysinfo {
|
|||||||
pub max_mfn: u64,
|
pub max_mfn: u64,
|
||||||
pub hw_cap: [u32; 8],
|
pub hw_cap: [u32; 8],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
|
pub struct SysctlReadconsole {
|
||||||
|
pub clear: u8,
|
||||||
|
pub incremental: u8,
|
||||||
|
pub pad: u16,
|
||||||
|
pub index: u32,
|
||||||
|
pub buffer: u64,
|
||||||
|
pub count: u32,
|
||||||
|
}
|
||||||
|
@ -13,9 +13,9 @@ async-trait = { workspace = true }
|
|||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
krata-xencall = { path = "../xencall", version = "^0.0.14" }
|
krata-xencall = { path = "../xencall", version = "^0.0.20" }
|
||||||
krata-xenplatform = { path = "../xenplatform", version = "^0.0.14" }
|
krata-xenplatform = { path = "../xenplatform", version = "^0.0.20" }
|
||||||
krata-xenstore = { path = "../xenstore", version = "^0.0.14" }
|
krata-xenstore = { path = "../xenstore", version = "^0.0.20" }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
@ -27,7 +27,9 @@ async fn main() -> Result<()> {
|
|||||||
base: BaseDomainConfig {
|
base: BaseDomainConfig {
|
||||||
uuid: Uuid::new_v4(),
|
uuid: Uuid::new_v4(),
|
||||||
max_vcpus: 1,
|
max_vcpus: 1,
|
||||||
mem_mb: 512,
|
target_vcpus: 1,
|
||||||
|
max_mem_mb: 512,
|
||||||
|
target_mem_mb: 512,
|
||||||
enable_iommu: true,
|
enable_iommu: true,
|
||||||
kernel: fs::read(&kernel_image_path).await?,
|
kernel: fs::read(&kernel_image_path).await?,
|
||||||
initrd: fs::read(&initrd_path).await?,
|
initrd: fs::read(&initrd_path).await?,
|
||||||
|
@ -130,8 +130,7 @@ impl<P: BootSetupPlatform> XenClient<P> {
|
|||||||
match self.init(created.domid, config, &created).await {
|
match self.init(created.domid, config, &created).await {
|
||||||
Ok(_) => Ok(created),
|
Ok(_) => Ok(created),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// ignore since destroying a domain is best
|
// ignore since destroying a domain is best-effort when an error occurs
|
||||||
// effort when an error occurs
|
|
||||||
let _ = self.domain_manager.destroy(created.domid).await;
|
let _ = self.domain_manager.destroy(created.domid).await;
|
||||||
Err(err)
|
Err(err)
|
||||||
}
|
}
|
||||||
|
@ -156,13 +156,13 @@ impl ClientTransaction {
|
|||||||
self.tx
|
self.tx
|
||||||
.write_string(
|
.write_string(
|
||||||
format!("{}/memory/static-max", self.dom_path).as_str(),
|
format!("{}/memory/static-max", self.dom_path).as_str(),
|
||||||
&(base.mem_mb * 1024).to_string(),
|
&(base.max_mem_mb * 1024).to_string(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
self.tx
|
self.tx
|
||||||
.write_string(
|
.write_string(
|
||||||
format!("{}/memory/target", self.dom_path).as_str(),
|
format!("{}/memory/target", self.dom_path).as_str(),
|
||||||
&(base.mem_mb * 1024).to_string(),
|
&(base.target_mem_mb * 1024).to_string(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
self.tx
|
self.tx
|
||||||
@ -194,7 +194,16 @@ impl ClientTransaction {
|
|||||||
self.tx.mkdir(&path).await?;
|
self.tx.mkdir(&path).await?;
|
||||||
self.tx.set_perms(&path, ro_perm).await?;
|
self.tx.set_perms(&path, ro_perm).await?;
|
||||||
let path = format!("{}/cpu/{}/availability", self.dom_path, i);
|
let path = format!("{}/cpu/{}/availability", self.dom_path, i);
|
||||||
self.tx.write_string(&path, "online").await?;
|
self.tx
|
||||||
|
.write_string(
|
||||||
|
&path,
|
||||||
|
if i < base.target_vcpus {
|
||||||
|
"online"
|
||||||
|
} else {
|
||||||
|
"offline"
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
self.tx.set_perms(&path, ro_perm).await?;
|
self.tx.set_perms(&path, ro_perm).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -9,6 +9,7 @@ edition = "2021"
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
byteorder = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use xenevtchn::error::Result;
|
use xenevtchn::error::Result;
|
||||||
use xenevtchn::EventChannel;
|
use xenevtchn::EventChannelService;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let channel = EventChannel::open().await?;
|
let channel = EventChannelService::open().await?;
|
||||||
println!("channel opened");
|
println!("channel opened");
|
||||||
let port = channel.bind_unbound_port(0).await?;
|
let port = channel.bind_unbound_port(0).await?;
|
||||||
println!("port: {}", port);
|
println!("port: {}", port);
|
||||||
|
@ -8,6 +8,10 @@ pub enum Error {
|
|||||||
Io(#[from] io::Error),
|
Io(#[from] io::Error),
|
||||||
#[error("failed to send event channel wake: {0}")]
|
#[error("failed to send event channel wake: {0}")]
|
||||||
WakeSend(tokio::sync::broadcast::error::SendError<u32>),
|
WakeSend(tokio::sync::broadcast::error::SendError<u32>),
|
||||||
|
#[error("failed to acquire lock")]
|
||||||
|
LockAcquireFailed,
|
||||||
|
#[error("event port already in use")]
|
||||||
|
PortInUse,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
@ -1,82 +1,77 @@
|
|||||||
pub mod error;
|
pub mod error;
|
||||||
|
pub mod raw;
|
||||||
pub mod sys;
|
pub mod sys;
|
||||||
|
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::sys::{BindInterdomain, BindUnboundPort, BindVirq, Notify, UnbindPort};
|
use crate::sys::{BindInterdomain, BindUnboundPort, BindVirq, Notify, UnbindPort};
|
||||||
|
|
||||||
|
use crate::raw::EVENT_CHANNEL_DEVICE;
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
use log::error;
|
use log::error;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::os::fd::AsRawFd;
|
use std::os::fd::AsRawFd;
|
||||||
use std::os::raw::c_void;
|
use std::os::raw::c_void;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::fs::{File, OpenOptions};
|
use tokio::fs::{File, OpenOptions};
|
||||||
use tokio::io::AsyncReadExt;
|
|
||||||
use tokio::select;
|
|
||||||
use tokio::sync::broadcast::{
|
|
||||||
channel as broadcast_channel, Receiver as BroadcastReceiver, Sender as BroadastSender,
|
|
||||||
};
|
|
||||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::{Mutex, RwLock};
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
|
|
||||||
const UNBIND_CHANNEL_QUEUE_LEN: usize = 30;
|
const CHANNEL_QUEUE_LEN: usize = 30;
|
||||||
const UNMASK_CHANNEL_QUEUE_LEN: usize = 30;
|
|
||||||
const BROADCAST_CHANNEL_QUEUE_LEN: usize = 30;
|
|
||||||
|
|
||||||
type WakeMap = Arc<Mutex<HashMap<u32, BroadastSender<u32>>>>;
|
type WakeMap = Arc<RwLock<HashMap<u32, Sender<u32>>>>;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct EventChannel {
|
pub struct EventChannelService {
|
||||||
handle: Arc<Mutex<File>>,
|
handle: Arc<Mutex<File>>,
|
||||||
wakes: WakeMap,
|
wakes: WakeMap,
|
||||||
unbind_sender: Sender<u32>,
|
process_flag: Arc<AtomicBool>,
|
||||||
unmask_sender: Sender<u32>,
|
|
||||||
task: Arc<JoinHandle<()>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BoundEventChannel {
|
pub struct BoundEventChannel {
|
||||||
pub local_port: u32,
|
pub local_port: u32,
|
||||||
pub receiver: BroadcastReceiver<u32>,
|
pub receiver: Receiver<u32>,
|
||||||
unbind_sender: Sender<u32>,
|
pub service: EventChannelService,
|
||||||
pub unmask_sender: Sender<u32>,
|
}
|
||||||
|
|
||||||
|
impl BoundEventChannel {
|
||||||
|
pub async fn unmask(&self) -> Result<()> {
|
||||||
|
self.service.unmask(self.local_port).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BoundEventChannel {
|
impl Drop for BoundEventChannel {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let _ = self.unbind_sender.try_send(self.local_port);
|
let service = self.service.clone();
|
||||||
|
let port = self.local_port;
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let _ = service.unbind(port).await;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EventChannel {
|
impl EventChannelService {
|
||||||
pub async fn open() -> Result<EventChannel> {
|
pub async fn open() -> Result<EventChannelService> {
|
||||||
let file = OpenOptions::new()
|
let handle = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.open("/dev/xen/evtchn")
|
.open(EVENT_CHANNEL_DEVICE)
|
||||||
.await?;
|
.await?;
|
||||||
|
let wakes = Arc::new(RwLock::new(HashMap::new()));
|
||||||
let wakes = Arc::new(Mutex::new(HashMap::new()));
|
let flag = Arc::new(AtomicBool::new(false));
|
||||||
let (unbind_sender, unbind_receiver) = channel(UNBIND_CHANNEL_QUEUE_LEN);
|
let processor = EventChannelProcessor {
|
||||||
let (unmask_sender, unmask_receiver) = channel(UNMASK_CHANNEL_QUEUE_LEN);
|
flag: flag.clone(),
|
||||||
let task = {
|
handle: handle.try_clone().await?.into_std().await,
|
||||||
let file = file.try_clone().await?;
|
wakes: wakes.clone(),
|
||||||
let wakes = wakes.clone();
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
if let Err(error) =
|
|
||||||
EventChannel::process(file, wakes, unmask_receiver, unbind_receiver).await
|
|
||||||
{
|
|
||||||
error!("event channel processor failed: {}", error);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
};
|
};
|
||||||
Ok(EventChannel {
|
processor.launch()?;
|
||||||
handle: Arc::new(Mutex::new(file)),
|
|
||||||
|
Ok(EventChannelService {
|
||||||
|
handle: Arc::new(Mutex::new(handle)),
|
||||||
wakes,
|
wakes,
|
||||||
unbind_sender,
|
process_flag: flag,
|
||||||
unmask_sender,
|
|
||||||
task: Arc::new(task),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,11 +104,29 @@ impl EventChannel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn unmask(&self, port: u32) -> Result<()> {
|
||||||
|
let handle = self.handle.lock().await;
|
||||||
|
let mut port = port;
|
||||||
|
let result = unsafe {
|
||||||
|
libc::write(
|
||||||
|
handle.as_raw_fd(),
|
||||||
|
&mut port as *mut u32 as *mut c_void,
|
||||||
|
size_of::<u32>(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if result != size_of::<u32>() as isize {
|
||||||
|
return Err(Error::Io(std::io::Error::from_raw_os_error(result as i32)));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn unbind(&self, port: u32) -> Result<u32> {
|
pub async fn unbind(&self, port: u32) -> Result<u32> {
|
||||||
let handle = self.handle.lock().await;
|
let handle = self.handle.lock().await;
|
||||||
unsafe {
|
unsafe {
|
||||||
let mut request = UnbindPort { port };
|
let mut request = UnbindPort { port };
|
||||||
Ok(sys::unbind(handle.as_raw_fd(), &mut request)? as u32)
|
let result = sys::unbind(handle.as_raw_fd(), &mut request)? as u32;
|
||||||
|
self.wakes.write().await.remove(&port);
|
||||||
|
Ok(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,95 +145,66 @@ impl EventChannel {
|
|||||||
|
|
||||||
pub async fn bind(&self, domid: u32, port: u32) -> Result<BoundEventChannel> {
|
pub async fn bind(&self, domid: u32, port: u32) -> Result<BoundEventChannel> {
|
||||||
let local_port = self.bind_interdomain(domid, port).await?;
|
let local_port = self.bind_interdomain(domid, port).await?;
|
||||||
let (receiver, unmask_sender) = self.subscribe(local_port).await?;
|
let receiver = self.subscribe(local_port).await?;
|
||||||
let bound = BoundEventChannel {
|
let bound = BoundEventChannel {
|
||||||
local_port,
|
local_port,
|
||||||
receiver,
|
receiver,
|
||||||
unbind_sender: self.unbind_sender.clone(),
|
service: self.clone(),
|
||||||
unmask_sender,
|
|
||||||
};
|
};
|
||||||
Ok(bound)
|
Ok(bound)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subscribe(&self, port: u32) -> Result<(BroadcastReceiver<u32>, Sender<u32>)> {
|
pub async fn subscribe(&self, port: u32) -> Result<Receiver<u32>> {
|
||||||
let mut wakes = self.wakes.lock().await;
|
let mut wakes = self.wakes.write().await;
|
||||||
let receiver = match wakes.entry(port) {
|
let receiver = match wakes.entry(port) {
|
||||||
Entry::Occupied(entry) => entry.get().subscribe(),
|
Entry::Occupied(_) => {
|
||||||
|
return Err(Error::PortInUse);
|
||||||
|
}
|
||||||
|
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
let (sender, receiver) = broadcast_channel::<u32>(BROADCAST_CHANNEL_QUEUE_LEN);
|
let (sender, receiver) = channel::<u32>(CHANNEL_QUEUE_LEN);
|
||||||
entry.insert(sender);
|
entry.insert(sender);
|
||||||
receiver
|
receiver
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok((receiver, self.unmask_sender.clone()))
|
Ok(receiver)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn process(
|
pub struct EventChannelProcessor {
|
||||||
mut file: File,
|
flag: Arc<AtomicBool>,
|
||||||
wakers: WakeMap,
|
handle: std::fs::File,
|
||||||
mut unmask_receiver: Receiver<u32>,
|
wakes: WakeMap,
|
||||||
mut unbind_receiver: Receiver<u32>,
|
}
|
||||||
) -> Result<()> {
|
|
||||||
loop {
|
|
||||||
select! {
|
|
||||||
result = file.read_u32_le() => {
|
|
||||||
match result {
|
|
||||||
Ok(port) => {
|
|
||||||
if let Some(sender) = wakers.lock().await.get(&port) {
|
|
||||||
if let Err(error) = sender.send(port) {
|
|
||||||
return Err(Error::WakeSend(error));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(error) => return Err(Error::Io(error))
|
impl EventChannelProcessor {
|
||||||
}
|
pub fn launch(mut self) -> Result<()> {
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
while let Err(error) = self.process() {
|
||||||
|
if self.flag.load(Ordering::Acquire) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
error!("failed to process event channel wakes: {}", error);
|
||||||
result = unmask_receiver.recv() => {
|
}
|
||||||
match result {
|
});
|
||||||
Some(port) => {
|
|
||||||
unsafe {
|
|
||||||
let mut port = port;
|
|
||||||
let result = libc::write(file.as_raw_fd(), &mut port as *mut u32 as *mut c_void, size_of::<u32>());
|
|
||||||
if result != size_of::<u32>() as isize {
|
|
||||||
return Err(Error::Io(std::io::Error::from_raw_os_error(result as i32)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result = unbind_receiver.recv() => {
|
|
||||||
match result {
|
|
||||||
Some(port) => {
|
|
||||||
unsafe {
|
|
||||||
let mut request = UnbindPort { port };
|
|
||||||
sys::unbind(file.as_raw_fd(), &mut request)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for EventChannel {
|
pub fn process(&mut self) -> Result<()> {
|
||||||
fn drop(&mut self) {
|
loop {
|
||||||
if Arc::strong_count(&self.task) <= 1 {
|
let port = self.handle.read_u32::<LittleEndian>()?;
|
||||||
self.task.abort();
|
if let Some(wake) = self.wakes.blocking_read().get(&port) {
|
||||||
|
let _ = wake.try_send(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for EventChannelService {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.handle) <= 1 {
|
||||||
|
self.process_flag.store(true, Ordering::Release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
84
crates/xen/xenevtchn/src/raw.rs
Normal file
84
crates/xen/xenevtchn/src/raw.rs
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
use std::fs::{File, OpenOptions};
|
||||||
|
use std::os::fd::AsRawFd;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
|
|
||||||
|
use crate::error::{Error, Result};
|
||||||
|
use crate::sys;
|
||||||
|
|
||||||
|
pub const EVENT_CHANNEL_DEVICE: &str = "/dev/xen/evtchn";
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RawEventChannelService {
|
||||||
|
handle: Arc<Mutex<File>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RawEventChannelService {
|
||||||
|
pub fn open() -> Result<RawEventChannelService> {
|
||||||
|
let handle = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(true)
|
||||||
|
.open(EVENT_CHANNEL_DEVICE)?;
|
||||||
|
let handle = Arc::new(Mutex::new(handle));
|
||||||
|
Ok(RawEventChannelService { handle })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_handle(handle: File) -> Result<RawEventChannelService> {
|
||||||
|
Ok(RawEventChannelService {
|
||||||
|
handle: Arc::new(Mutex::new(handle)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bind_virq(&self, virq: u32) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
let mut request = sys::BindVirq { virq };
|
||||||
|
Ok(unsafe { sys::bind_virq(handle.as_raw_fd(), &mut request)? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bind_interdomain(&self, domid: u32, port: u32) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
let mut request = sys::BindInterdomain {
|
||||||
|
remote_domain: domid,
|
||||||
|
remote_port: port,
|
||||||
|
};
|
||||||
|
Ok(unsafe { sys::bind_interdomain(handle.as_raw_fd(), &mut request)? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bind_unbound_port(&self, domid: u32) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
let mut request = sys::BindUnboundPort {
|
||||||
|
remote_domain: domid,
|
||||||
|
};
|
||||||
|
Ok(unsafe { sys::bind_unbound_port(handle.as_raw_fd(), &mut request)? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn unbind(&self, port: u32) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
let mut request = sys::UnbindPort { port };
|
||||||
|
Ok(unsafe { sys::unbind(handle.as_raw_fd(), &mut request)? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn notify(&self, port: u32) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
let mut request = sys::Notify { port };
|
||||||
|
Ok(unsafe { sys::notify(handle.as_raw_fd(), &mut request)? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(&self) -> Result<u32> {
|
||||||
|
let handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
Ok(unsafe { sys::reset(handle.as_raw_fd())? as u32 })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pending(&self) -> Result<u32> {
|
||||||
|
let mut handle = self.handle.lock().map_err(|_| Error::LockAcquireFailed)?;
|
||||||
|
Ok(handle.read_u32::<LittleEndian>()?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_handle(self) -> Result<File> {
|
||||||
|
Arc::into_inner(self.handle)
|
||||||
|
.ok_or(Error::LockAcquireFailed)?
|
||||||
|
.into_inner()
|
||||||
|
.map_err(|_| Error::LockAcquireFailed)
|
||||||
|
}
|
||||||
|
}
|
@ -16,7 +16,7 @@ flate2 = { workspace = true }
|
|||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
krata-xencall = { path = "../xencall", version = "^0.0.14" }
|
krata-xencall = { path = "../xencall", version = "^0.0.20" }
|
||||||
memchr = { workspace = true }
|
memchr = { workspace = true }
|
||||||
nix = { workspace = true }
|
nix = { workspace = true }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
|
@ -162,11 +162,13 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
|
|||||||
pub async fn initialize(
|
pub async fn initialize(
|
||||||
&mut self,
|
&mut self,
|
||||||
initrd: &[u8],
|
initrd: &[u8],
|
||||||
mem_mb: u64,
|
target_mem_mb: u64,
|
||||||
|
max_mem_mb: u64,
|
||||||
max_vcpus: u32,
|
max_vcpus: u32,
|
||||||
cmdline: &str,
|
cmdline: &str,
|
||||||
) -> Result<BootDomain> {
|
) -> Result<BootDomain> {
|
||||||
let total_pages = mem_mb << (20 - self.platform.page_shift());
|
let target_pages = target_mem_mb << (20 - self.platform.page_shift());
|
||||||
|
let total_pages = max_mem_mb << (20 - self.platform.page_shift());
|
||||||
let image_info = self.image_loader.parse(self.platform.hvm()).await?;
|
let image_info = self.image_loader.parse(self.platform.hvm()).await?;
|
||||||
let mut domain = BootDomain {
|
let mut domain = BootDomain {
|
||||||
domid: self.domid,
|
domid: self.domid,
|
||||||
@ -175,7 +177,7 @@ impl<I: BootImageLoader, P: BootSetupPlatform> BootSetup<I, P> {
|
|||||||
virt_pgtab_end: 0,
|
virt_pgtab_end: 0,
|
||||||
pfn_alloc_end: 0,
|
pfn_alloc_end: 0,
|
||||||
total_pages,
|
total_pages,
|
||||||
target_pages: total_pages,
|
target_pages,
|
||||||
page_size: self.platform.page_size(),
|
page_size: self.platform.page_size(),
|
||||||
image_info,
|
image_info,
|
||||||
console_evtchn: 0,
|
console_evtchn: 0,
|
||||||
|
@ -9,6 +9,8 @@ use xencall::XenCall;
|
|||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
|
pub const XEN_EXTRA_MEMORY_KB: u64 = 2048;
|
||||||
|
|
||||||
pub struct BaseDomainManager<P: BootSetupPlatform> {
|
pub struct BaseDomainManager<P: BootSetupPlatform> {
|
||||||
call: XenCall,
|
call: XenCall,
|
||||||
pub platform: Arc<P>,
|
pub platform: Arc<P>,
|
||||||
@ -29,7 +31,7 @@ impl<P: BootSetupPlatform> BaseDomainManager<P> {
|
|||||||
let domid = self.call.create_domain(domain).await?;
|
let domid = self.call.create_domain(domain).await?;
|
||||||
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
self.call.set_max_vcpus(domid, config.max_vcpus).await?;
|
||||||
self.call
|
self.call
|
||||||
.set_max_mem(domid, (config.mem_mb * 1024) + 2048)
|
.set_max_mem(domid, (config.max_mem_mb * 1024) + XEN_EXTRA_MEMORY_KB)
|
||||||
.await?;
|
.await?;
|
||||||
let loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
|
let loader = ElfImageLoader::load_file_kernel(&config.kernel)?;
|
||||||
let platform = (*self.platform).clone();
|
let platform = (*self.platform).clone();
|
||||||
@ -37,7 +39,8 @@ impl<P: BootSetupPlatform> BaseDomainManager<P> {
|
|||||||
let mut domain = boot
|
let mut domain = boot
|
||||||
.initialize(
|
.initialize(
|
||||||
&config.initrd,
|
&config.initrd,
|
||||||
config.mem_mb,
|
config.target_mem_mb,
|
||||||
|
config.max_mem_mb,
|
||||||
config.max_vcpus,
|
config.max_vcpus,
|
||||||
&config.cmdline,
|
&config.cmdline,
|
||||||
)
|
)
|
||||||
@ -63,7 +66,9 @@ pub struct BaseDomainConfig {
|
|||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub owner_domid: u32,
|
pub owner_domid: u32,
|
||||||
pub max_vcpus: u32,
|
pub max_vcpus: u32,
|
||||||
pub mem_mb: u64,
|
pub target_vcpus: u32,
|
||||||
|
pub max_mem_mb: u64,
|
||||||
|
pub target_mem_mb: u64,
|
||||||
pub kernel: Vec<u8>,
|
pub kernel: Vec<u8>,
|
||||||
pub initrd: Vec<u8>,
|
pub initrd: Vec<u8>,
|
||||||
pub cmdline: String,
|
pub cmdline: String,
|
||||||
|
@ -116,9 +116,11 @@ impl XsdSocket {
|
|||||||
let rx_task = std::thread::Builder::new()
|
let rx_task = std::thread::Builder::new()
|
||||||
.name("xenstore-reader".to_string())
|
.name("xenstore-reader".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
if let Err(error) = XsdSocketProcessor::process_rx(read, rx_sender) {
|
let mut read = read;
|
||||||
|
if let Err(error) = XsdSocketProcessor::process_rx(&mut read, rx_sender) {
|
||||||
debug!("failed to process xen store bus: {}", error);
|
debug!("failed to process xen store bus: {}", error);
|
||||||
}
|
}
|
||||||
|
std::mem::forget(read);
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(XsdSocket {
|
Ok(XsdSocket {
|
||||||
@ -197,12 +199,11 @@ struct XsdSocketProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl XsdSocketProcessor {
|
impl XsdSocketProcessor {
|
||||||
fn process_rx(mut read: std::fs::File, rx_sender: Sender<XsdMessage>) -> Result<()> {
|
fn process_rx(read: &mut std::fs::File, rx_sender: Sender<XsdMessage>) -> Result<()> {
|
||||||
let mut header_buffer: Vec<u8> = vec![0u8; XsdMessageHeader::SIZE];
|
let mut header_buffer: Vec<u8> = vec![0u8; XsdMessageHeader::SIZE];
|
||||||
let mut buffer: Vec<u8> = vec![0u8; XEN_BUS_MAX_PACKET_SIZE - XsdMessageHeader::SIZE];
|
let mut buffer: Vec<u8> = vec![0u8; XEN_BUS_MAX_PACKET_SIZE - XsdMessageHeader::SIZE];
|
||||||
loop {
|
loop {
|
||||||
let message =
|
let message = XsdSocketProcessor::read_message(&mut header_buffer, &mut buffer, read)?;
|
||||||
XsdSocketProcessor::read_message(&mut header_buffer, &mut buffer, &mut read)?;
|
|
||||||
rx_sender.blocking_send(message)?;
|
rx_sender.blocking_send(message)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -297,7 +298,7 @@ impl XsdSocketProcessor {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user