mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 21:21:32 +00:00
Compare commits
19 Commits
Author | SHA1 | Date | |
---|---|---|---|
398e555bd3 | |||
75901233b1 | |||
04665ce690 | |||
481a5884d9 | |||
5ee1035896 | |||
9bd8d1bb1d | |||
3bada811b2 | |||
e08d25ebde | |||
2c884a6882 | |||
d756fa82f4 | |||
6e051f52b9 | |||
b2fba6400e | |||
b26469be28 | |||
28d63d7d70 | |||
6b91f0be94 | |||
9e91ffe065 | |||
b57d95c610 | |||
de6bfe38fe | |||
f6dffd6e17 |
14
.github/dependabot.yml
vendored
14
.github/dependabot.yml
vendored
@ -5,10 +5,10 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
groups:
|
groups:
|
||||||
production-version-updates:
|
dep-updates:
|
||||||
dependency-type: "production"
|
dependency-type: "production"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
development-version-updates:
|
dev-updates:
|
||||||
dependency-type: "development"
|
dependency-type: "development"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
- package-ecosystem: "cargo"
|
- package-ecosystem: "cargo"
|
||||||
@ -16,20 +16,20 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
groups:
|
groups:
|
||||||
production-version-updates:
|
dep-updates:
|
||||||
dependency-type: "production"
|
dependency-type: "production"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
development-version-updates:
|
dev-updates:
|
||||||
dependency-type: "development"
|
dependency-type: "development"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
- package-ecosystem: "docker"
|
- package-ecosystem: "docker"
|
||||||
directory: "/"
|
directory: "/images"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
groups:
|
groups:
|
||||||
production-version-updates:
|
dep-updates:
|
||||||
dependency-type: "production"
|
dependency-type: "production"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
development-version-updates:
|
dev-updates:
|
||||||
dependency-type: "development"
|
dependency-type: "development"
|
||||||
applies-to: "version-updates"
|
applies-to: "version-updates"
|
||||||
|
190
.github/workflows/check.yml
vendored
190
.github/workflows/check.yml
vendored
@ -7,30 +7,196 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
jobs:
|
jobs:
|
||||||
fmt:
|
rustfmt:
|
||||||
name: fmt
|
name: rustfmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
- name: install stable rust toolchain with rustfmt
|
||||||
with:
|
run: |
|
||||||
components: rustfmt
|
rustup update --no-self-update stable
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
rustup default stable
|
||||||
|
rustup component add rustfmt
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
# Temporarily ignored: https://github.com/edera-dev/krata/issues/206
|
# Temporarily ignored: https://github.com/edera-dev/krata/issues/206
|
||||||
- run: ./hack/build/cargo.sh fmt --all -- --check || true
|
- name: cargo fmt
|
||||||
|
run: ./hack/build/cargo.sh fmt --all -- --check || true
|
||||||
shellcheck:
|
shellcheck:
|
||||||
name: shellcheck
|
name: shellcheck
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- run: ./hack/code/shellcheck.sh
|
- name: shellcheck
|
||||||
|
run: ./hack/code/shellcheck.sh
|
||||||
|
full-build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
env:
|
||||||
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
|
name: full build linux-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: cargo build
|
||||||
|
run: ./hack/build/cargo.sh build
|
||||||
|
full-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
env:
|
||||||
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
|
name: full test linux-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: cargo test
|
||||||
|
run: ./hack/build/cargo.sh test
|
||||||
|
full-clippy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
env:
|
||||||
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
|
name: full clippy linux-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain with clippy
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
rustup component add clippy
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: cargo clippy
|
||||||
|
run: ./hack/build/cargo.sh clippy
|
||||||
|
zone-initrd:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
env:
|
||||||
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
|
name: zone initrd linux-${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: initrd build
|
||||||
|
run: ./hack/initrd/build.sh
|
||||||
|
kratactl-build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
|
||||||
|
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
|
||||||
|
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
|
||||||
|
env:
|
||||||
|
TARGET_OS: "${{ matrix.platform.os }}"
|
||||||
|
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||||
|
runs-on: "${{ matrix.platform.on }}"
|
||||||
|
name: kratactl build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: configure git line endings
|
||||||
|
run: git config --global core.autocrlf false && git config --global core.eol lf
|
||||||
|
if: ${{ matrix.platform.os == 'windows' }}
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
|
||||||
|
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
|
||||||
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
|
- name: setup homebrew
|
||||||
|
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
||||||
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
|
- name: install ${{ matrix.platform.deps }} dependencies
|
||||||
|
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
||||||
|
- name: cargo build kratactl
|
||||||
|
run: ./hack/build/cargo.sh build --bin kratactl
|
||||||
|
47
.github/workflows/client.yml
vendored
47
.github/workflows/client.yml
vendored
@ -1,47 +0,0 @@
|
|||||||
name: client
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
merge_group:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform:
|
|
||||||
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
|
|
||||||
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
|
|
||||||
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
|
|
||||||
env:
|
|
||||||
TARGET_OS: "${{ matrix.platform.os }}"
|
|
||||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
|
||||||
runs-on: "${{ matrix.platform.on }}"
|
|
||||||
name: client build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- run: git config --global core.autocrlf false && git config --global core.eol lf
|
|
||||||
if: ${{ matrix.platform.os == 'windows' }}
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
if: ${{ matrix.platform.os != 'darwin' }}
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
with:
|
|
||||||
targets: "${{ matrix.platform.arch }}-apple-darwin"
|
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
|
||||||
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
|
||||||
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
|
||||||
- run: ./hack/build/cargo.sh build --bin kratactl
|
|
122
.github/workflows/nightly.yml
vendored
122
.github/workflows/nightly.yml
vendored
@ -5,10 +5,8 @@ on:
|
|||||||
- cron: "0 10 * * *"
|
- cron: "0 10 * * *"
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
|
||||||
id-token: write
|
|
||||||
jobs:
|
jobs:
|
||||||
server:
|
full-build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -18,45 +16,49 @@ jobs:
|
|||||||
- aarch64
|
- aarch64
|
||||||
env:
|
env:
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
name: nightly server ${{ matrix.arch }}
|
CI_NEEDS_FPM: "1"
|
||||||
|
name: nightly full build linux-${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
|
||||||
with:
|
run: |
|
||||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
rustup update --no-self-update stable
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
rustup default stable
|
||||||
- run: ./hack/dist/bundle.sh
|
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: build systemd bundle
|
||||||
|
run: ./hack/dist/bundle.sh
|
||||||
|
- name: upload systemd bundle
|
||||||
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
with:
|
with:
|
||||||
name: krata-bundle-systemd-${{ matrix.arch }}
|
name: krata-bundle-systemd-${{ matrix.arch }}
|
||||||
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
path: "target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
- run: ./hack/dist/deb.sh
|
- name: build deb package
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
run: ./hack/dist/deb.sh
|
||||||
|
- name: upload deb package
|
||||||
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
with:
|
with:
|
||||||
name: krata-debian-${{ matrix.arch }}
|
name: krata-debian-${{ matrix.arch }}
|
||||||
path: "target/dist/*.deb"
|
path: "target/dist/*.deb"
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
- run: ./hack/dist/apk.sh
|
- name: build apk package
|
||||||
env:
|
run: ./hack/dist/apk.sh
|
||||||
KRATA_KERNEL_BUILD_SKIP: "1"
|
- name: upload apk package
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
with:
|
with:
|
||||||
name: krata-alpine-${{ matrix.arch }}
|
name: krata-alpine-${{ matrix.arch }}
|
||||||
path: "target/dist/*_${{ matrix.arch }}.apk"
|
path: "target/dist/*_${{ matrix.arch }}.apk"
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
- run: ./hack/os/build.sh
|
kratactl-build:
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
|
||||||
with:
|
|
||||||
name: krata-os-${{ matrix.arch }}
|
|
||||||
path: "target/os/krata-${{ matrix.arch }}.qcow2"
|
|
||||||
compression-level: 0
|
|
||||||
client:
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@ -71,40 +73,49 @@ jobs:
|
|||||||
TARGET_OS: "${{ matrix.platform.os }}"
|
TARGET_OS: "${{ matrix.platform.os }}"
|
||||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||||
runs-on: "${{ matrix.platform.on }}"
|
runs-on: "${{ matrix.platform.on }}"
|
||||||
name: nightly client ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
name: nightly kratactl build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- run: git config --global core.autocrlf false && git config --global core.eol lf
|
- name: configure git line endings
|
||||||
|
run: git config --global core.autocrlf false && git config --global core.eol lf
|
||||||
if: ${{ matrix.platform.os == 'windows' }}
|
if: ${{ matrix.platform.os == 'windows' }}
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
- name: install stable rust toolchain
|
||||||
if: ${{ matrix.platform.os != 'darwin' }}
|
run: |
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
rustup update --no-self-update stable
|
||||||
with:
|
rustup default stable
|
||||||
targets: "${{ matrix.platform.arch }}-apple-darwin"
|
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
|
||||||
|
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
- name: setup homebrew
|
||||||
|
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
- name: install ${{ matrix.platform.deps }} dependencies
|
||||||
- run: ./hack/build/cargo.sh build --release --bin kratactl
|
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
- name: cargo build kratactl
|
||||||
|
run: ./hack/build/cargo.sh build --release --bin kratactl
|
||||||
|
- name: upload kratactl
|
||||||
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
with:
|
with:
|
||||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
path: "target/*/release/kratactl"
|
path: "target/*/release/kratactl"
|
||||||
if: ${{ matrix.platform.os != 'windows' }}
|
if: ${{ matrix.platform.os != 'windows' }}
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
- name: upload kratactl
|
||||||
|
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||||
with:
|
with:
|
||||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
path: "target/*/release/kratactl.exe"
|
path: "target/*/release/kratactl.exe"
|
||||||
if: ${{ matrix.platform.os == 'windows' }}
|
if: ${{ matrix.platform.os == 'windows' }}
|
||||||
oci:
|
oci-build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -113,31 +124,42 @@ jobs:
|
|||||||
- kratactl
|
- kratactl
|
||||||
- kratad
|
- kratad
|
||||||
- kratanet
|
- kratanet
|
||||||
- krata-guest-init
|
- krata-zone
|
||||||
name: "oci build ${{ matrix.component }}"
|
name: nightly oci build ${{ matrix.component }}
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write
|
||||||
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
- name: install cosign
|
||||||
- uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v3.4.0
|
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
||||||
- uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
- name: setup docker buildx
|
||||||
|
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v3.4.0
|
||||||
|
- name: login to container registry
|
||||||
|
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: "${{ github.actor }}"
|
username: "${{ github.actor }}"
|
||||||
password: "${{ secrets.GITHUB_TOKEN }}"
|
password: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
- uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v6.3.0
|
- name: docker build and push ${{ matrix.component }}
|
||||||
|
uses: docker/build-push-action@1ca370b3a9802c92e886402e0dd88098a2533b12 # v6.4.1
|
||||||
id: push
|
id: push
|
||||||
with:
|
with:
|
||||||
file: ./images/Dockerfile.${{ matrix.component }}
|
file: ./images/Dockerfile.${{ matrix.component }}
|
||||||
platforms: linux/amd64,linux/aarch64
|
platforms: linux/amd64,linux/aarch64
|
||||||
tags: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
|
tags: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
|
||||||
push: true
|
push: true
|
||||||
- env:
|
- name: cosign sign ${{ matrix.component }}
|
||||||
|
run: cosign sign --yes "${TAGS}@${DIGEST}"
|
||||||
|
env:
|
||||||
DIGEST: "${{ steps.push.outputs.digest }}"
|
DIGEST: "${{ steps.push.outputs.digest }}"
|
||||||
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
|
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:nightly"
|
||||||
COSIGN_EXPERIMENTAL: "true"
|
COSIGN_EXPERIMENTAL: "true"
|
||||||
run: cosign sign --yes "${TAGS}@${DIGEST}"
|
|
||||||
|
37
.github/workflows/os.yml
vendored
37
.github/workflows/os.yml
vendored
@ -1,37 +0,0 @@
|
|||||||
name: os
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
merge_group:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: os build ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
with:
|
|
||||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/os/build.sh
|
|
||||||
- uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
|
||||||
with:
|
|
||||||
name: krata-os-${{ matrix.arch }}
|
|
||||||
path: "target/os/krata-${{ matrix.arch }}.qcow2"
|
|
||||||
compression-level: 0
|
|
166
.github/workflows/release-assets.yml
vendored
Normal file
166
.github/workflows/release-assets.yml
vendored
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
name: release-assets
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
env:
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||||
|
CARGO_NET_RETRY: 10
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
RUST_BACKTRACE: 1
|
||||||
|
RUSTUP_MAX_RETRIES: 10
|
||||||
|
jobs:
|
||||||
|
services:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
env:
|
||||||
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
|
CI_NEEDS_FPM: "1"
|
||||||
|
name: release-assets services ${{ matrix.arch }}
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain with ${{ matrix.arch }}-unknown-linux-gnu and ${{ matrix.arch }}-unknown-linux-musl rust targets
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
rustup target add ${{ matrix.arch }}-unknown-linux-gnu ${{ matrix.arch }}-unknown-linux-musl
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
|
- name: build systemd bundle
|
||||||
|
run: ./hack/dist/bundle.sh
|
||||||
|
- name: assemble systemd bundle
|
||||||
|
run: "./hack/ci/assemble-release-assets.sh bundle-systemd ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
||||||
|
- name: build deb package
|
||||||
|
run: ./hack/dist/deb.sh
|
||||||
|
- name: assemble deb package
|
||||||
|
run: "./hack/ci/assemble-release-assets.sh debian ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*.deb"
|
||||||
|
- name: build apk package
|
||||||
|
run: ./hack/dist/apk.sh
|
||||||
|
- name: assemble apk package
|
||||||
|
run: "./hack/ci/assemble-release-assets.sh alpine ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*_${{ matrix.arch }}.apk"
|
||||||
|
- name: upload release artifacts
|
||||||
|
run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
kratactl:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
|
||||||
|
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
|
||||||
|
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||||
|
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
|
||||||
|
env:
|
||||||
|
TARGET_OS: "${{ matrix.platform.os }}"
|
||||||
|
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||||
|
runs-on: "${{ matrix.platform.on }}"
|
||||||
|
name: release-assets kratactl ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
timeout-minutes: 60
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install stable rust toolchain
|
||||||
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
- name: install ${{ matrix.platform.arch }}-apple-darwin rust target
|
||||||
|
run: "rustup target add --toolchain stable ${{ matrix.platform.arch }}-apple-darwin"
|
||||||
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
|
- name: setup homebrew
|
||||||
|
uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
||||||
|
if: ${{ matrix.platform.os == 'darwin' }}
|
||||||
|
- name: install ${{ matrix.platform.deps }} dependencies
|
||||||
|
run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
||||||
|
- name: cargo build kratactl
|
||||||
|
run: ./hack/build/cargo.sh build --release --bin kratactl
|
||||||
|
- name: assemble kratactl executable
|
||||||
|
run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl"
|
||||||
|
if: ${{ matrix.platform.os != 'windows' }}
|
||||||
|
- name: assemble kratactl executable
|
||||||
|
run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl.exe"
|
||||||
|
if: ${{ matrix.platform.os == 'windows' }}
|
||||||
|
- name: upload release artifacts
|
||||||
|
run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
oci:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
component:
|
||||||
|
- kratactl
|
||||||
|
- kratad
|
||||||
|
- kratanet
|
||||||
|
- krata-zone
|
||||||
|
name: release-assets oci ${{ matrix.component }}
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
|
with:
|
||||||
|
egress-policy: audit
|
||||||
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: install cosign
|
||||||
|
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
||||||
|
- name: setup docker buildx
|
||||||
|
uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v3.4.0
|
||||||
|
- name: login to container registry
|
||||||
|
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: "${{ github.actor }}"
|
||||||
|
password: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
- name: capture krata version
|
||||||
|
id: version
|
||||||
|
run: |
|
||||||
|
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
|
||||||
|
- name: docker build and push ${{ matrix.component }}
|
||||||
|
uses: docker/build-push-action@1ca370b3a9802c92e886402e0dd88098a2533b12 # v6.4.1
|
||||||
|
id: push
|
||||||
|
with:
|
||||||
|
file: ./images/Dockerfile.${{ matrix.component }}
|
||||||
|
platforms: linux/amd64,linux/aarch64
|
||||||
|
tags: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}"
|
||||||
|
push: true
|
||||||
|
- name: cosign sign ${{ matrix.component }}
|
||||||
|
run: cosign sign --yes "${TAGS}@${DIGEST}"
|
||||||
|
env:
|
||||||
|
DIGEST: "${{ steps.push.outputs.digest }}"
|
||||||
|
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}"
|
||||||
|
COSIGN_EXPERIMENTAL: "true"
|
134
.github/workflows/release-binaries.yml
vendored
134
.github/workflows/release-binaries.yml
vendored
@ -1,134 +0,0 @@
|
|||||||
name: release-binaries
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
packages: write
|
|
||||||
id-token: write
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types:
|
|
||||||
- published
|
|
||||||
env:
|
|
||||||
CARGO_INCREMENTAL: 0
|
|
||||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
|
||||||
CARGO_NET_RETRY: 10
|
|
||||||
CARGO_TERM_COLOR: always
|
|
||||||
RUST_BACKTRACE: 1
|
|
||||||
RUSTUP_MAX_RETRIES: 10
|
|
||||||
jobs:
|
|
||||||
server:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: release-binaries server ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
with:
|
|
||||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/dist/bundle.sh
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh bundle-systemd ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
|
||||||
- run: ./hack/dist/deb.sh
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh debian ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*.deb"
|
|
||||||
- run: ./hack/dist/apk.sh
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh alpine ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*_${{ matrix.arch }}.apk"
|
|
||||||
- run: ./hack/os/build.sh
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh os ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/os/krata-${{ matrix.arch }}.qcow2"
|
|
||||||
- run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
client:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform:
|
|
||||||
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
|
|
||||||
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
|
|
||||||
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
|
|
||||||
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
|
|
||||||
env:
|
|
||||||
TARGET_OS: "${{ matrix.platform.os }}"
|
|
||||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
|
||||||
runs-on: "${{ matrix.platform.on }}"
|
|
||||||
name: release-binaries client ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
timeout-minutes: 60
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
if: ${{ matrix.platform.os != 'darwin' }}
|
|
||||||
- uses: dtolnay/rust-toolchain@stable
|
|
||||||
with:
|
|
||||||
targets: "${{ matrix.platform.arch }}-apple-darwin"
|
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
|
||||||
- uses: homebrew/actions/setup-homebrew@4b34604e75af8f8b23b454f0b5ffb7c5d8ce0056 # master
|
|
||||||
if: ${{ matrix.platform.os == 'darwin' }}
|
|
||||||
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
|
||||||
- run: ./hack/build/cargo.sh build --release --bin kratactl
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl"
|
|
||||||
if: ${{ matrix.platform.os != 'windows' }}
|
|
||||||
- run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl.exe"
|
|
||||||
if: ${{ matrix.platform.os == 'windows' }}
|
|
||||||
- run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
oci:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
component:
|
|
||||||
- kratactl
|
|
||||||
- kratad
|
|
||||||
- kratanet
|
|
||||||
- krata-guest-init
|
|
||||||
name: "release-binaries oci ${{ matrix.component }}"
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
|
|
||||||
- uses: docker/setup-buildx-action@4fd812986e6c8c2a69e18311145f9371337f27d4 # v3.4.0
|
|
||||||
- uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: "${{ github.actor }}"
|
|
||||||
password: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
- id: version
|
|
||||||
run: |
|
|
||||||
echo "KRATA_VERSION=$(./hack/dist/version.sh)" >> "${GITHUB_OUTPUT}"
|
|
||||||
- uses: docker/build-push-action@1a162644f9a7e87d8f4b053101d1d9a712edc18c # v6.3.0
|
|
||||||
id: push
|
|
||||||
with:
|
|
||||||
file: ./images/Dockerfile.${{ matrix.component }}
|
|
||||||
platforms: linux/amd64,linux/aarch64
|
|
||||||
tags: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}"
|
|
||||||
push: true
|
|
||||||
- env:
|
|
||||||
DIGEST: "${{ steps.push.outputs.digest }}"
|
|
||||||
TAGS: "ghcr.io/edera-dev/${{ matrix.component }}:${{ steps.version.outputs.KRATA_VERSION }}"
|
|
||||||
COSIGN_EXPERIMENTAL: "true"
|
|
||||||
run: cosign sign --yes "${TAGS}@${DIGEST}"
|
|
23
.github/workflows/release-plz.yml
vendored
23
.github/workflows/release-plz.yml
vendored
@ -1,7 +1,4 @@
|
|||||||
name: release-plz
|
name: release-plz
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
contents: write
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
@ -13,22 +10,32 @@ jobs:
|
|||||||
release-plz:
|
release-plz:
|
||||||
name: release-plz
|
name: release-plz
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
- name: harden runner
|
||||||
|
uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0
|
||||||
with:
|
with:
|
||||||
egress-policy: audit
|
egress-policy: audit
|
||||||
- uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3
|
- name: generate cultivator token
|
||||||
|
uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3
|
||||||
id: generate-token
|
id: generate-token
|
||||||
with:
|
with:
|
||||||
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
|
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
|
||||||
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
|
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
- name: checkout repository
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
token: "${{ steps.generate-token.outputs.token }}"
|
token: "${{ steps.generate-token.outputs.token }}"
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
- name: install stable rust toolchain
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
run: |
|
||||||
|
rustup update --no-self-update stable
|
||||||
|
rustup default stable
|
||||||
|
- name: install linux dependencies
|
||||||
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
- name: release-plz
|
- name: release-plz
|
||||||
uses: MarcoIeni/release-plz-action@86afd21a7b114234aab55ba0005eed52f77d89e4 # v0.5.62
|
uses: MarcoIeni/release-plz-action@86afd21a7b114234aab55ba0005eed52f77d89e4 # v0.5.62
|
||||||
env:
|
env:
|
||||||
|
94
.github/workflows/server.yml
vendored
94
.github/workflows/server.yml
vendored
@ -1,94 +0,0 @@
|
|||||||
name: server
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
merge_group:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: server build ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/build/cargo.sh build
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: server test ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/build/cargo.sh test
|
|
||||||
clippy:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: server clippy ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
with:
|
|
||||||
components: clippy
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/build/cargo.sh clippy
|
|
||||||
initrd:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
- aarch64
|
|
||||||
env:
|
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
|
||||||
name: server initrd ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
|
||||||
with:
|
|
||||||
egress-policy: audit
|
|
||||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: dtolnay/rust-toolchain@d388a4836fcdbde0e50e395dc79a2670ccdef13f # stable
|
|
||||||
with:
|
|
||||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
|
||||||
- run: ./hack/ci/install-linux-deps.sh
|
|
||||||
- run: ./hack/initrd/build.sh
|
|
11
CHANGELOG.md
11
CHANGELOG.md
@ -6,6 +6,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.0.13](https://github.com/edera-dev/krata/compare/v0.0.12...v0.0.13) - 2024-07-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- *(kratactl)* rework cli to use subcommands ([#268](https://github.com/edera-dev/krata/pull/268))
|
||||||
|
- *(krata)* rename guest to zone ([#266](https://github.com/edera-dev/krata/pull/266))
|
||||||
|
|
||||||
|
### Other
|
||||||
|
- *(deps)* upgrade dependencies, fix hyper io traits issue ([#252](https://github.com/edera-dev/krata/pull/252))
|
||||||
|
- update Cargo.lock dependencies
|
||||||
|
- update Cargo.toml dependencies
|
||||||
|
|
||||||
## [0.0.12](https://github.com/edera-dev/krata/compare/v0.0.11...v0.0.12) - 2024-07-12
|
## [0.0.12](https://github.com/edera-dev/krata/compare/v0.0.11...v0.0.12) - 2024-07-12
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
471
Cargo.lock
generated
471
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
37
Cargo.toml
37
Cargo.toml
@ -3,7 +3,7 @@ members = [
|
|||||||
"crates/build",
|
"crates/build",
|
||||||
"crates/krata",
|
"crates/krata",
|
||||||
"crates/oci",
|
"crates/oci",
|
||||||
"crates/guest",
|
"crates/zone",
|
||||||
"crates/runtime",
|
"crates/runtime",
|
||||||
"crates/daemon",
|
"crates/daemon",
|
||||||
"crates/network",
|
"crates/network",
|
||||||
@ -18,7 +18,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.0.12"
|
version = "0.0.13"
|
||||||
homepage = "https://krata.dev"
|
homepage = "https://krata.dev"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
repository = "https://github.com/edera-dev/krata"
|
repository = "https://github.com/edera-dev/krata"
|
||||||
@ -29,10 +29,10 @@ arrayvec = "0.7.4"
|
|||||||
async-compression = "0.4.11"
|
async-compression = "0.4.11"
|
||||||
async-stream = "0.3.5"
|
async-stream = "0.3.5"
|
||||||
async-trait = "0.1.81"
|
async-trait = "0.1.81"
|
||||||
backhand = "0.15.0"
|
backhand = "0.18.0"
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
bytes = "1.5.0"
|
bytes = "1.6.1"
|
||||||
c2rust-bitfields = "0.18.0"
|
c2rust-bitfields = "0.18.0"
|
||||||
cgroups-rs = "0.3.4"
|
cgroups-rs = "0.3.4"
|
||||||
circular-buffer = "0.1.7"
|
circular-buffer = "0.1.7"
|
||||||
@ -41,10 +41,12 @@ crossterm = "0.27.0"
|
|||||||
ctrlc = "3.4.4"
|
ctrlc = "3.4.4"
|
||||||
elf = "0.7.4"
|
elf = "0.7.4"
|
||||||
env_logger = "0.11.0"
|
env_logger = "0.11.0"
|
||||||
etherparse = "0.14.3"
|
etherparse = "0.15.0"
|
||||||
fancy-duration = "0.9.2"
|
fancy-duration = "0.9.2"
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
futures = "0.3.30"
|
futures = "0.3.30"
|
||||||
|
hyper = "1.4.1"
|
||||||
|
hyper-util = "0.1.6"
|
||||||
human_bytes = "0.4"
|
human_bytes = "0.4"
|
||||||
indexmap = "2.2.6"
|
indexmap = "2.2.6"
|
||||||
indicatif = "0.17.8"
|
indicatif = "0.17.8"
|
||||||
@ -60,13 +62,14 @@ oci-spec = "0.6.7"
|
|||||||
once_cell = "1.19.0"
|
once_cell = "1.19.0"
|
||||||
path-absolutize = "3.1.1"
|
path-absolutize = "3.1.1"
|
||||||
path-clean = "1.0.1"
|
path-clean = "1.0.1"
|
||||||
|
pin-project-lite = "0.2.14"
|
||||||
platform-info = "2.0.3"
|
platform-info = "2.0.3"
|
||||||
prost = "0.12.6"
|
prost = "0.13.1"
|
||||||
prost-build = "0.12.6"
|
prost-build = "0.13.1"
|
||||||
prost-reflect-build = "0.13.0"
|
prost-reflect-build = "0.14.0"
|
||||||
prost-types = "0.12.6"
|
prost-types = "0.13.1"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
ratatui = "0.26.3"
|
ratatui = "0.27.0"
|
||||||
redb = "2.1.1"
|
redb = "2.1.1"
|
||||||
regex = "1.10.5"
|
regex = "1.10.5"
|
||||||
rtnetlink = "0.14.1"
|
rtnetlink = "0.14.1"
|
||||||
@ -78,13 +81,13 @@ signal-hook = "0.3.17"
|
|||||||
slice-copy = "0.3.0"
|
slice-copy = "0.3.0"
|
||||||
smoltcp = "0.11.0"
|
smoltcp = "0.11.0"
|
||||||
sysinfo = "0.30.13"
|
sysinfo = "0.30.13"
|
||||||
termtree = "0.4.1"
|
termtree = "0.5.0"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
tokio-tun = "0.11.5"
|
tokio-tun = "0.11.5"
|
||||||
toml = "0.8.14"
|
toml = "0.8.15"
|
||||||
tonic-build = "0.11.0"
|
tonic-build = "0.12.1"
|
||||||
tower = "0.4.13"
|
tower = "0.4.13"
|
||||||
udp-stream = "0.0.11"
|
udp-stream = "0.0.12"
|
||||||
url = "2.5.2"
|
url = "2.5.2"
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
xz2 = "0.1"
|
xz2 = "0.1"
|
||||||
@ -94,7 +97,7 @@ version = "4.5.9"
|
|||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.prost-reflect]
|
[workspace.dependencies.prost-reflect]
|
||||||
version = "0.13.1"
|
version = "0.14.0"
|
||||||
features = ["derive"]
|
features = ["derive"]
|
||||||
|
|
||||||
[workspace.dependencies.reqwest]
|
[workspace.dependencies.reqwest]
|
||||||
@ -111,7 +114,7 @@ version = "3.0.0"
|
|||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[workspace.dependencies.tokio]
|
[workspace.dependencies.tokio]
|
||||||
version = "1.38.0"
|
version = "1.38.1"
|
||||||
features = ["full"]
|
features = ["full"]
|
||||||
|
|
||||||
[workspace.dependencies.tokio-stream]
|
[workspace.dependencies.tokio-stream]
|
||||||
@ -119,7 +122,7 @@ version = "0.1"
|
|||||||
features = ["io-util", "net"]
|
features = ["io-util", "net"]
|
||||||
|
|
||||||
[workspace.dependencies.tonic]
|
[workspace.dependencies.tonic]
|
||||||
version = "0.11.0"
|
version = "0.12.1"
|
||||||
features = ["tls"]
|
features = ["tls"]
|
||||||
|
|
||||||
[workspace.dependencies.uuid]
|
[workspace.dependencies.uuid]
|
||||||
|
57
DEV.md
57
DEV.md
@ -4,25 +4,25 @@
|
|||||||
|
|
||||||
krata is composed of four major executables:
|
krata is composed of four major executables:
|
||||||
|
|
||||||
| Executable | Runs On | User Interaction | Dev Runner | Code Path |
|
| Executable | Runs On | User Interaction | Dev Runner | Code Path |
|
||||||
| ---------- | ------- | ---------------- | ------------------------ | ----------------- |
|
|------------|---------|------------------|--------------------------|----------------|
|
||||||
| kratad | host | backend daemon | ./hack/debug/kratad.sh | crates/daemon |
|
| kratad | host | backend daemon | ./hack/debug/kratad.sh | crates/daemon |
|
||||||
| kratanet | host | backend daemon | ./hack/debug/kratanet.sh | crates/network |
|
| kratanet | host | backend daemon | ./hack/debug/kratanet.sh | crates/network |
|
||||||
| kratactl | host | CLI tool | ./hack/debug/kratactl.sh | crates/ctl |
|
| kratactl | host | CLI tool | ./hack/debug/kratactl.sh | crates/ctl |
|
||||||
| krataguest | guest | none, guest init | N/A | crates/guest |
|
| kratazone | zone | none, zone init | N/A | crates/zone |
|
||||||
|
|
||||||
You will find the code to each executable available in the bin/ and src/ directories inside
|
You will find the code to each executable available in the bin/ and src/ directories inside
|
||||||
it's corresponding code path from the above table.
|
it's corresponding code path from the above table.
|
||||||
|
|
||||||
## Environment
|
## Environment
|
||||||
|
|
||||||
| Component | Specification | Notes |
|
| Component | Specification | Notes |
|
||||||
| ------------- | ------------- | --------------------------------------------------------------------------------- |
|
|--------------|---------------|----------------------------------------------------------------------------------|
|
||||||
| Architecture | x86_64 | aarch64 support is still in development |
|
| Architecture | x86_64 | aarch64 support is still in development |
|
||||||
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata guests room |
|
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata zones room |
|
||||||
| Xen | 4.17 | Temporary due to hardcoded interface version constants |
|
| Xen | 4.17+ | |
|
||||||
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
|
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
|
||||||
| rustup | any | Install Rustup from https://rustup.rs |
|
| rustup | any | Install Rustup from https://rustup.rs |
|
||||||
|
|
||||||
## Setup Guide
|
## Setup Guide
|
||||||
|
|
||||||
@ -31,8 +31,7 @@ it's corresponding code path from the above table.
|
|||||||
2. Install required packages:
|
2. Install required packages:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ apt install git xen-system-amd64 build-essential \
|
$ apt install git xen-system-amd64 build-essential musl-tools \
|
||||||
libclang-dev musl-tools flex bison libelf-dev libssl-dev bc \
|
|
||||||
protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
|
protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -45,10 +44,10 @@ $ rustup target add x86_64-unknown-linux-gnu
|
|||||||
$ rustup target add x86_64-unknown-linux-musl
|
$ rustup target add x86_64-unknown-linux-musl
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Configure `/etc/default/grub.d/xen.cfg` to give krata guests some room:
|
4. Configure `/etc/default/grub.d/xen.cfg` to give krata zones some room:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Configure dom0_mem to be 4GB, but leave the rest of the RAM for krata guests.
|
# Configure dom0_mem to be 4GB, but leave the rest of the RAM for krata zones.
|
||||||
GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=4G,max:4G"
|
GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=4G,max:4G"
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -64,36 +63,36 @@ $ git clone https://github.com/edera-dev/krata.git krata
|
|||||||
$ cd krata
|
$ cd krata
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Fetch the guest kernel image:
|
6. Fetch the zone kernel image:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/kernel/fetch.sh -u
|
$ ./hack/kernel/fetch.sh -u
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Copy the guest kernel artifacts to `/var/lib/krata/guest/kernel` so it is automatically detected by kratad:
|
7. Copy the zone kernel artifacts to `/var/lib/krata/zone/kernel` so it is automatically detected by kratad:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ mkdir -p /var/lib/krata/guest
|
$ mkdir -p /var/lib/krata/zone
|
||||||
$ cp target/kernel/kernel-x86_64 /var/lib/krata/guest/kernel
|
$ cp target/kernel/kernel-x86_64 /var/lib/krata/zone/kernel
|
||||||
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/guest/addons.squashfs
|
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/zone/addons.squashfs
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
||||||
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
||||||
10. Run `kratactl` to launch a guest:
|
10. Run `kratactl` to launch a zone:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh launch --attach alpine:latest
|
$ ./hack/debug/kratactl.sh zone launch --attach alpine:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
To detach from the guest console, use `Ctrl + ]` on your keyboard.
|
To detach from the zone console, use `Ctrl + ]` on your keyboard.
|
||||||
|
|
||||||
To list the running guests, run:
|
To list the running zones, run:
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh list
|
$ ./hack/debug/kratactl.sh zone list
|
||||||
```
|
```
|
||||||
|
|
||||||
To destroy a running guest, copy it's UUID from either the launch command or the guest list and run:
|
To destroy a running zone, copy it's UUID from either the launch command or the zone list and run:
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh destroy GUEST_UUID
|
$ ./hack/debug/kratactl.sh zone destroy ZONE_UUID
|
||||||
```
|
```
|
||||||
|
2
FAQ.md
2
FAQ.md
@ -12,4 +12,4 @@ Xen is a very interesting technology, and Edera believes that type-1 hypervisors
|
|||||||
|
|
||||||
## Why not utilize pvcalls to provide access to the host network?
|
## Why not utilize pvcalls to provide access to the host network?
|
||||||
|
|
||||||
pvcalls is extremely interesting, and although it is certainly possible to utilize pvcalls to get the job done, we chose to utilize userspace networking technology in order to enhance security. Our goal is to drop the use of all xen networking backend drivers within the kernel and have the guest talk directly to a userspace daemon, bypassing the vif (xen-netback) driver. Currently, in order to develop the networking layer, we utilize xen-netback and then use raw sockets to provide the userspace networking layer on the host.
|
pvcalls is fascinating, and although it is certainly possible to utilize pvcalls to get the job done, we chose to utilize userspace networking technology in order to enhance security. Our goal is to drop the use of all xen networking backend drivers within the kernel and have the guest talk directly to a userspace daemon, bypassing the vif (xen-netback) driver. Currently, in order to develop the networking layer, we utilize xen-netback and then use raw sockets to provide the userspace networking layer on the host.
|
||||||
|
12
README.md
12
README.md
@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
An isolation engine for securing compute workloads.
|
An isolation engine for securing compute workloads.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kratactl zone launch -a alpine:latest
|
||||||
|
```
|
||||||
|
|
||||||

|

|
||||||

|

|
||||||
[](https://github.com/edera-dev/krata/actions/workflows/check.yml)
|
[](https://github.com/edera-dev/krata/actions/workflows/check.yml)
|
||||||
@ -22,7 +26,7 @@ krata utilizes the core of the Xen hypervisor with a fully memory-safe Rust cont
|
|||||||
|
|
||||||
## Hardware Support
|
## Hardware Support
|
||||||
|
|
||||||
| Architecture | Completion Level | Hardware Virtualization |
|
| Architecture | Completion Level | Hardware Virtualization |
|
||||||
| ------------ | ---------------- | ------------------------------- |
|
|--------------|------------------|-------------------------|
|
||||||
| x86_64 | 100% Completed | None, Intel VT-x, AMD-V |
|
| x86_64 | 100% Completed | None, Intel VT-x, AMD-V |
|
||||||
| aarch64 | 10% Completed | AArch64 virtualization |
|
| aarch64 | 10% Completed | AArch64 virtualization |
|
||||||
|
@ -16,7 +16,7 @@ oci-spec = { workspace = true }
|
|||||||
scopeguard = { workspace = true }
|
scopeguard = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-stream = { workspace = true }
|
tokio-stream = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.12" }
|
krata-oci = { path = "../oci", version = "^0.0.13" }
|
||||||
krata-tokio-tar = { workspace = true }
|
krata-tokio-tar = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ async fn main() -> Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let image = ImageName::parse(&args().nth(1).unwrap())?;
|
let image = ImageName::parse(&args().nth(1).unwrap())?;
|
||||||
let mut cache_dir = std::env::temp_dir().clone();
|
let mut cache_dir = env::temp_dir().clone();
|
||||||
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
|
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
|
||||||
fs::create_dir_all(&cache_dir).await?;
|
fs::create_dir_all(&cache_dir).await?;
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ env_logger = { workspace = true }
|
|||||||
fancy-duration = { workspace = true }
|
fancy-duration = { workspace = true }
|
||||||
human_bytes = { workspace = true }
|
human_bytes = { workspace = true }
|
||||||
indicatif = { workspace = true }
|
indicatif = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.12" }
|
krata = { path = "../krata", version = "^0.0.13" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost-reflect = { workspace = true, features = ["serde"] }
|
prost-reflect = { workspace = true, features = ["serde"] }
|
||||||
prost-types = { workspace = true }
|
prost-types = { workspace = true }
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use clap::Parser;
|
|
||||||
use krata::v1::control::{control_service_client::ControlServiceClient, HostCpuTopologyRequest};
|
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
|
||||||
|
|
||||||
fn class_to_str(input: i32) -> String {
|
|
||||||
match input {
|
|
||||||
0 => "Standard".to_string(),
|
|
||||||
1 => "Performance".to_string(),
|
|
||||||
2 => "Efficiency".to_string(),
|
|
||||||
_ => "???".to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command(about = "Display information about a host's CPU topology")]
|
|
||||||
pub struct CpuTopologyCommand {}
|
|
||||||
|
|
||||||
impl CpuTopologyCommand {
|
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
|
||||||
println!(
|
|
||||||
"{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}",
|
|
||||||
"CPUID", "Node", "Socket", "Core", "Thread", "Class"
|
|
||||||
);
|
|
||||||
|
|
||||||
let response = client
|
|
||||||
.get_host_cpu_topology(Request::new(HostCpuTopologyRequest {}))
|
|
||||||
.await?
|
|
||||||
.into_inner();
|
|
||||||
|
|
||||||
for (i, cpu) in response.cpus.iter().enumerate() {
|
|
||||||
println!(
|
|
||||||
"{0:<10} {1:<10} {2:<10} {3:<10} {4:<10} {5:<10}",
|
|
||||||
i,
|
|
||||||
cpu.node,
|
|
||||||
cpu.socket,
|
|
||||||
cpu.core,
|
|
||||||
cpu.thread,
|
|
||||||
class_to_str(cpu.class)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -12,7 +12,7 @@ use tonic::transport::Channel;
|
|||||||
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum ListDevicesFormat {
|
enum DeviceListFormat {
|
||||||
Table,
|
Table,
|
||||||
Json,
|
Json,
|
||||||
JsonPretty,
|
JsonPretty,
|
||||||
@ -24,12 +24,12 @@ enum ListDevicesFormat {
|
|||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "List the devices on the isolation engine")]
|
#[command(about = "List the devices on the isolation engine")]
|
||||||
pub struct ListDevicesCommand {
|
pub struct DeviceListCommand {
|
||||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
format: ListDevicesFormat,
|
format: DeviceListFormat,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListDevicesCommand {
|
impl DeviceListCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
@ -44,26 +44,26 @@ impl ListDevicesCommand {
|
|||||||
devices.sort_by(|a, b| a.name.cmp(&b.name));
|
devices.sort_by(|a, b| a.name.cmp(&b.name));
|
||||||
|
|
||||||
match self.format {
|
match self.format {
|
||||||
ListDevicesFormat::Table => {
|
DeviceListFormat::Table => {
|
||||||
self.print_devices_table(devices)?;
|
self.print_devices_table(devices)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
ListDevicesFormat::Simple => {
|
DeviceListFormat::Simple => {
|
||||||
for device in devices {
|
for device in devices {
|
||||||
println!("{}\t{}\t{}", device.name, device.claimed, device.owner);
|
println!("{}\t{}\t{}", device.name, device.claimed, device.owner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListDevicesFormat::Json | ListDevicesFormat::JsonPretty | ListDevicesFormat::Yaml => {
|
DeviceListFormat::Json | DeviceListFormat::JsonPretty | DeviceListFormat::Yaml => {
|
||||||
let mut values = Vec::new();
|
let mut values = Vec::new();
|
||||||
for device in devices {
|
for device in devices {
|
||||||
let message = proto2dynamic(device)?;
|
let message = proto2dynamic(device)?;
|
||||||
values.push(serde_json::to_value(message)?);
|
values.push(serde_json::to_value(message)?);
|
||||||
}
|
}
|
||||||
let value = Value::Array(values);
|
let value = Value::Array(values);
|
||||||
let encoded = if self.format == ListDevicesFormat::JsonPretty {
|
let encoded = if self.format == DeviceListFormat::JsonPretty {
|
||||||
serde_json::to_string_pretty(&value)?
|
serde_json::to_string_pretty(&value)?
|
||||||
} else if self.format == ListDevicesFormat::Yaml {
|
} else if self.format == DeviceListFormat::Yaml {
|
||||||
serde_yaml::to_string(&value)?
|
serde_yaml::to_string(&value)?
|
||||||
} else {
|
} else {
|
||||||
serde_json::to_string(&value)?
|
serde_json::to_string(&value)?
|
||||||
@ -71,14 +71,14 @@ impl ListDevicesCommand {
|
|||||||
println!("{}", encoded.trim());
|
println!("{}", encoded.trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
ListDevicesFormat::Jsonl => {
|
DeviceListFormat::Jsonl => {
|
||||||
for device in devices {
|
for device in devices {
|
||||||
let message = proto2dynamic(device)?;
|
let message = proto2dynamic(device)?;
|
||||||
println!("{}", serde_json::to_string(&message)?);
|
println!("{}", serde_json::to_string(&message)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListDevicesFormat::KeyValue => {
|
DeviceListFormat::KeyValue => {
|
||||||
self.print_key_value(devices)?;
|
self.print_key_value(devices)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
44
crates/ctl/src/cli/device/mod.rs
Normal file
44
crates/ctl/src/cli/device/mod.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
use crate::cli::device::list::DeviceListCommand;
|
||||||
|
|
||||||
|
pub mod list;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage the devices on the isolation engine")]
|
||||||
|
pub struct DeviceCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: DeviceCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DeviceCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum DeviceCommands {
|
||||||
|
List(DeviceListCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DeviceCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
DeviceCommands::List(list) => list.run(client, events).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
60
crates/ctl/src/cli/host/cpu_topology.rs
Normal file
60
crates/ctl/src/cli/host/cpu_topology.rs
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use comfy_table::presets::UTF8_FULL_CONDENSED;
|
||||||
|
use comfy_table::{Cell, Table};
|
||||||
|
use krata::v1::control::{
|
||||||
|
control_service_client::ControlServiceClient, HostCpuTopologyClass, HostCpuTopologyRequest,
|
||||||
|
};
|
||||||
|
|
||||||
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
|
fn class_to_str(input: HostCpuTopologyClass) -> String {
|
||||||
|
match input {
|
||||||
|
HostCpuTopologyClass::Standard => "Standard".to_string(),
|
||||||
|
HostCpuTopologyClass::Performance => "Performance".to_string(),
|
||||||
|
HostCpuTopologyClass::Efficiency => "Efficiency".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
|
enum HostCpuTopologyFormat {
|
||||||
|
Table,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Display information about the host CPU topology")]
|
||||||
|
pub struct HostCpuTopologyCommand {
|
||||||
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
|
format: HostCpuTopologyFormat,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostCpuTopologyCommand {
|
||||||
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
|
let response = client
|
||||||
|
.get_host_cpu_topology(Request::new(HostCpuTopologyRequest {}))
|
||||||
|
.await?
|
||||||
|
.into_inner();
|
||||||
|
|
||||||
|
let mut table = Table::new();
|
||||||
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
|
table.set_header(vec!["id", "node", "socket", "core", "thread", "class"]);
|
||||||
|
|
||||||
|
for (i, cpu) in response.cpus.iter().enumerate() {
|
||||||
|
table.add_row(vec![
|
||||||
|
Cell::new(i),
|
||||||
|
Cell::new(cpu.node),
|
||||||
|
Cell::new(cpu.socket),
|
||||||
|
Cell::new(cpu.core),
|
||||||
|
Cell::new(cpu.thread),
|
||||||
|
Cell::new(class_to_str(cpu.class())),
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !table.is_empty() {
|
||||||
|
println!("{}", table);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -6,9 +6,9 @@ use tonic::{transport::Channel, Request};
|
|||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Identify information about the host")]
|
#[command(about = "Identify information about the host")]
|
||||||
pub struct IdentifyHostCommand {}
|
pub struct HostIdentifyCommand {}
|
||||||
|
|
||||||
impl IdentifyHostCommand {
|
impl HostIdentifyCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let response = client
|
let response = client
|
||||||
.identify_host(Request::new(IdentifyHostRequest {}))
|
.identify_host(Request::new(IdentifyHostRequest {}))
|
@ -15,7 +15,7 @@ use tonic::transport::Channel;
|
|||||||
use crate::format::{kv2line, proto2dynamic, value2kv};
|
use crate::format::{kv2line, proto2dynamic, value2kv};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum IdmSnoopFormat {
|
enum HostIdmSnoopFormat {
|
||||||
Simple,
|
Simple,
|
||||||
Jsonl,
|
Jsonl,
|
||||||
KeyValue,
|
KeyValue,
|
||||||
@ -23,12 +23,12 @@ enum IdmSnoopFormat {
|
|||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Snoop on the IDM bus")]
|
#[command(about = "Snoop on the IDM bus")]
|
||||||
pub struct IdmSnoopCommand {
|
pub struct HostIdmSnoopCommand {
|
||||||
#[arg(short, long, default_value = "simple", help = "Output format")]
|
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||||
format: IdmSnoopFormat,
|
format: HostIdmSnoopFormat,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IdmSnoopCommand {
|
impl HostIdmSnoopCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
@ -43,16 +43,16 @@ impl IdmSnoopCommand {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match self.format {
|
match self.format {
|
||||||
IdmSnoopFormat::Simple => {
|
HostIdmSnoopFormat::Simple => {
|
||||||
self.print_simple(line)?;
|
self.print_simple(line)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
IdmSnoopFormat::Jsonl => {
|
HostIdmSnoopFormat::Jsonl => {
|
||||||
let encoded = serde_json::to_string(&line)?;
|
let encoded = serde_json::to_string(&line)?;
|
||||||
println!("{}", encoded.trim());
|
println!("{}", encoded.trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
IdmSnoopFormat::KeyValue => {
|
HostIdmSnoopFormat::KeyValue => {
|
||||||
self.print_key_value(line)?;
|
self.print_key_value(line)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
54
crates/ctl/src/cli/host/mod.rs
Normal file
54
crates/ctl/src/cli/host/mod.rs
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
use crate::cli::host::cpu_topology::HostCpuTopologyCommand;
|
||||||
|
use crate::cli::host::identify::HostIdentifyCommand;
|
||||||
|
use crate::cli::host::idm_snoop::HostIdmSnoopCommand;
|
||||||
|
|
||||||
|
pub mod cpu_topology;
|
||||||
|
pub mod identify;
|
||||||
|
pub mod idm_snoop;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage the host of the isolation engine")]
|
||||||
|
pub struct HostCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: HostCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum HostCommands {
|
||||||
|
CpuTopology(HostCpuTopologyCommand),
|
||||||
|
Identify(HostIdentifyCommand),
|
||||||
|
IdmSnoop(HostIdmSnoopCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HostCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
HostCommands::CpuTopology(cpu_topology) => cpu_topology.run(client).await,
|
||||||
|
|
||||||
|
HostCommands::Identify(identify) => identify.run(client).await,
|
||||||
|
|
||||||
|
HostCommands::IdmSnoop(snoop) => snoop.run(client, events).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
44
crates/ctl/src/cli/image/mod.rs
Normal file
44
crates/ctl/src/cli/image/mod.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
use crate::cli::image::pull::ImagePullCommand;
|
||||||
|
|
||||||
|
pub mod pull;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage the images on the isolation engine")]
|
||||||
|
pub struct ImageCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: ImageCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ImageCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum ImageCommands {
|
||||||
|
Pull(ImagePullCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ImageCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
_events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
ImageCommands::Pull(pull) => pull.run(client).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -10,7 +10,7 @@ use tonic::transport::Channel;
|
|||||||
use crate::pull::pull_interactive_progress;
|
use crate::pull::pull_interactive_progress;
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum PullImageFormat {
|
pub enum ImagePullImageFormat {
|
||||||
Squashfs,
|
Squashfs,
|
||||||
Erofs,
|
Erofs,
|
||||||
Tar,
|
Tar,
|
||||||
@ -18,24 +18,24 @@ pub enum PullImageFormat {
|
|||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Pull an image into the cache")]
|
#[command(about = "Pull an image into the cache")]
|
||||||
pub struct PullCommand {
|
pub struct ImagePullCommand {
|
||||||
#[arg(help = "Image name")]
|
#[arg(help = "Image name")]
|
||||||
image: String,
|
image: String,
|
||||||
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
|
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
|
||||||
image_format: PullImageFormat,
|
image_format: ImagePullImageFormat,
|
||||||
#[arg(short = 'o', long, help = "Overwrite image cache")]
|
#[arg(short = 'o', long, help = "Overwrite image cache")]
|
||||||
overwrite_cache: bool,
|
overwrite_cache: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PullCommand {
|
impl ImagePullCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let response = client
|
let response = client
|
||||||
.pull_image(PullImageRequest {
|
.pull_image(PullImageRequest {
|
||||||
image: self.image.clone(),
|
image: self.image.clone(),
|
||||||
format: match self.image_format {
|
format: match self.image_format {
|
||||||
PullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
ImagePullImageFormat::Squashfs => OciImageFormat::Squashfs.into(),
|
||||||
PullImageFormat::Erofs => OciImageFormat::Erofs.into(),
|
ImagePullImageFormat::Erofs => OciImageFormat::Erofs.into(),
|
||||||
PullImageFormat::Tar => OciImageFormat::Tar.into(),
|
ImagePullImageFormat::Tar => OciImageFormat::Tar.into(),
|
||||||
},
|
},
|
||||||
overwrite_cache: self.overwrite_cache,
|
overwrite_cache: self.overwrite_cache,
|
||||||
})
|
})
|
@ -1,36 +1,21 @@
|
|||||||
pub mod attach;
|
pub mod device;
|
||||||
pub mod cpu_topology;
|
pub mod host;
|
||||||
pub mod destroy;
|
pub mod image;
|
||||||
pub mod exec;
|
pub mod zone;
|
||||||
pub mod identify_host;
|
|
||||||
pub mod idm_snoop;
|
|
||||||
pub mod launch;
|
|
||||||
pub mod list;
|
|
||||||
pub mod list_devices;
|
|
||||||
pub mod logs;
|
|
||||||
pub mod metrics;
|
|
||||||
pub mod pull;
|
|
||||||
pub mod resolve;
|
|
||||||
pub mod top;
|
|
||||||
pub mod watch;
|
|
||||||
|
|
||||||
|
use crate::cli::device::DeviceCommand;
|
||||||
|
use crate::cli::host::HostCommand;
|
||||||
|
use crate::cli::image::ImageCommand;
|
||||||
|
use crate::cli::zone::ZoneCommand;
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use clap::{Parser, Subcommand};
|
use clap::Parser;
|
||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::control::{control_service_client::ControlServiceClient, ResolveGuestRequest},
|
v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest},
|
||||||
};
|
};
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use self::{
|
|
||||||
attach::AttachCommand, cpu_topology::CpuTopologyCommand, destroy::DestroyCommand,
|
|
||||||
exec::ExecCommand, identify_host::IdentifyHostCommand, idm_snoop::IdmSnoopCommand,
|
|
||||||
launch::LaunchCommand, list::ListCommand, list_devices::ListDevicesCommand, logs::LogsCommand,
|
|
||||||
metrics::MetricsCommand, pull::PullCommand, resolve::ResolveCommand, top::TopCommand,
|
|
||||||
watch::WatchCommand,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(version, about = "Control the krata isolation engine")]
|
#[command(version, about = "Control the krata isolation engine")]
|
||||||
pub struct ControlCommand {
|
pub struct ControlCommand {
|
||||||
@ -43,26 +28,15 @@ pub struct ControlCommand {
|
|||||||
connection: String,
|
connection: String,
|
||||||
|
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
command: Commands,
|
command: ControlCommands,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
#[derive(Parser)]
|
||||||
pub enum Commands {
|
pub enum ControlCommands {
|
||||||
Launch(LaunchCommand),
|
Zone(ZoneCommand),
|
||||||
Destroy(DestroyCommand),
|
Image(ImageCommand),
|
||||||
List(ListCommand),
|
Device(DeviceCommand),
|
||||||
ListDevices(ListDevicesCommand),
|
Host(HostCommand),
|
||||||
Attach(AttachCommand),
|
|
||||||
Pull(PullCommand),
|
|
||||||
Logs(LogsCommand),
|
|
||||||
Watch(WatchCommand),
|
|
||||||
Resolve(ResolveCommand),
|
|
||||||
Metrics(MetricsCommand),
|
|
||||||
IdmSnoop(IdmSnoopCommand),
|
|
||||||
Top(TopCommand),
|
|
||||||
IdentifyHost(IdentifyHostCommand),
|
|
||||||
Exec(ExecCommand),
|
|
||||||
CpuTopology(CpuTopologyCommand),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ControlCommand {
|
impl ControlCommand {
|
||||||
@ -71,84 +45,31 @@ impl ControlCommand {
|
|||||||
let events = EventStream::open(client.clone()).await?;
|
let events = EventStream::open(client.clone()).await?;
|
||||||
|
|
||||||
match self.command {
|
match self.command {
|
||||||
Commands::Launch(launch) => {
|
ControlCommands::Zone(zone) => zone.run(client, events).await,
|
||||||
launch.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Destroy(destroy) => {
|
ControlCommands::Image(image) => image.run(client, events).await,
|
||||||
destroy.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Attach(attach) => {
|
ControlCommands::Device(device) => device.run(client, events).await,
|
||||||
attach.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Logs(logs) => {
|
ControlCommands::Host(snoop) => snoop.run(client, events).await,
|
||||||
logs.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::List(list) => {
|
|
||||||
list.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Watch(watch) => {
|
|
||||||
watch.run(events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Resolve(resolve) => {
|
|
||||||
resolve.run(client).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Metrics(metrics) => {
|
|
||||||
metrics.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::IdmSnoop(snoop) => {
|
|
||||||
snoop.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Top(top) => {
|
|
||||||
top.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Pull(pull) => {
|
|
||||||
pull.run(client).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::IdentifyHost(identify) => {
|
|
||||||
identify.run(client).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::Exec(exec) => {
|
|
||||||
exec.run(client).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::ListDevices(list) => {
|
|
||||||
list.run(client, events).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Commands::CpuTopology(cpu_topology) => {
|
|
||||||
cpu_topology.run(client).await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn resolve_guest(
|
pub async fn resolve_zone(
|
||||||
client: &mut ControlServiceClient<Channel>,
|
client: &mut ControlServiceClient<Channel>,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest {
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
Ok(guest.id)
|
Ok(zone.id)
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("unable to resolve guest '{}'", name))
|
Err(anyhow!("unable to resolve zone '{}'", name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,27 +7,27 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Attach to the guest console")]
|
#[command(about = "Attach to the zone console")]
|
||||||
pub struct AttachCommand {
|
pub struct ZoneAttachCommand {
|
||||||
#[arg(help = "Guest to attach to, either the name or the uuid")]
|
#[arg(help = "Zone to attach to, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttachCommand {
|
impl ZoneAttachCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let input = StdioConsoleStream::stdin_stream(guest_id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(zone_id.clone()).await;
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(guest_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
@ -3,10 +3,10 @@ use clap::Parser;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestStatus,
|
common::ZoneStatus,
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
DestroyGuestRequest,
|
DestroyZoneRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -14,67 +14,67 @@ use krata::{
|
|||||||
use log::error;
|
use log::error;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::cli::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Destroy a guest")]
|
#[command(about = "Destroy a zone")]
|
||||||
pub struct DestroyCommand {
|
pub struct ZoneDestroyCommand {
|
||||||
#[arg(
|
#[arg(
|
||||||
short = 'W',
|
short = 'W',
|
||||||
long,
|
long,
|
||||||
help = "Wait for the destruction of the guest to complete"
|
help = "Wait for the destruction of the zone to complete"
|
||||||
)]
|
)]
|
||||||
wait: bool,
|
wait: bool,
|
||||||
#[arg(help = "Guest to destroy, either the name or the uuid")]
|
#[arg(help = "Zone to destroy, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DestroyCommand {
|
impl ZoneDestroyCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let _ = client
|
let _ = client
|
||||||
.destroy_guest(Request::new(DestroyGuestRequest {
|
.destroy_zone(Request::new(DestroyZoneRequest {
|
||||||
guest_id: guest_id.clone(),
|
zone_id: zone_id.clone(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if self.wait {
|
if self.wait {
|
||||||
wait_guest_destroyed(&guest_id, events).await?;
|
wait_zone_destroyed(&zone_id, events).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_guest_destroyed(id: &str, events: EventStream) -> Result<()> {
|
async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = state.error_info {
|
||||||
if state.status() == GuestStatus::Failed {
|
if state.status() == ZoneStatus::Failed {
|
||||||
error!("destroy failed: {}", error.message);
|
error!("destroy failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
error!("guest error: {}", error.message);
|
error!("zone error: {}", error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Destroyed {
|
if state.status() == ZoneStatus::Destroyed {
|
||||||
std::process::exit(0);
|
std::process::exit(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -4,42 +4,42 @@ use anyhow::Result;
|
|||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{GuestTaskSpec, GuestTaskSpecEnvVar},
|
common::{ZoneTaskSpec, ZoneTaskSpecEnvVar},
|
||||||
control::{control_service_client::ControlServiceClient, ExecGuestRequest},
|
control::{control_service_client::ControlServiceClient, ExecZoneRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Execute a command inside the guest")]
|
#[command(about = "Execute a command inside the zone")]
|
||||||
pub struct ExecCommand {
|
pub struct ZoneExecCommand {
|
||||||
#[arg[short, long, help = "Environment variables"]]
|
#[arg[short, long, help = "Environment variables"]]
|
||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
#[arg(help = "Guest to exec inside, either the name or the uuid")]
|
#[arg(help = "Zone to exec inside, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
allow_hyphen_values = true,
|
allow_hyphen_values = true,
|
||||||
trailing_var_arg = true,
|
trailing_var_arg = true,
|
||||||
help = "Command to run inside the guest"
|
help = "Command to run inside the zone"
|
||||||
)]
|
)]
|
||||||
command: Vec<String>,
|
command: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExecCommand {
|
impl ZoneExecCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let initial = ExecGuestRequest {
|
let initial = ExecZoneRequest {
|
||||||
guest_id,
|
zone_id,
|
||||||
task: Some(GuestTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| GuestTaskSpecEnvVar {
|
.map(|(key, value)| ZoneTaskSpecEnvVar {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
value: value.clone(),
|
value: value.clone(),
|
||||||
})
|
})
|
||||||
@ -52,7 +52,7 @@ impl ExecCommand {
|
|||||||
|
|
||||||
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
||||||
|
|
||||||
let response = client.exec_guest(Request::new(stream)).await?.into_inner();
|
let response = client.exec_zone(Request::new(stream)).await?.into_inner();
|
||||||
|
|
||||||
let code = StdioConsoleStream::exec_output(response).await?;
|
let code = StdioConsoleStream::exec_output(response).await?;
|
||||||
std::process::exit(code);
|
std::process::exit(code);
|
@ -6,12 +6,12 @@ use krata::{
|
|||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{
|
common::{
|
||||||
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestSpecDevice,
|
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneOciImageSpec, ZoneSpec,
|
||||||
GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar, OciImageFormat,
|
ZoneSpecDevice, ZoneStatus, ZoneTaskSpec, ZoneTaskSpecEnvVar,
|
||||||
},
|
},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
CreateGuestRequest, PullImageRequest,
|
CreateZoneRequest, PullImageRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -28,61 +28,56 @@ pub enum LaunchImageFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Launch a new guest")]
|
#[command(about = "Launch a new zone")]
|
||||||
pub struct LaunchCommand {
|
pub struct ZoneLaunchCommand {
|
||||||
#[arg(long, default_value = "squashfs", help = "Image format")]
|
#[arg(long, default_value = "squashfs", help = "Image format")]
|
||||||
image_format: LaunchImageFormat,
|
image_format: LaunchImageFormat,
|
||||||
#[arg(long, help = "Overwrite image cache on pull")]
|
#[arg(long, help = "Overwrite image cache on pull")]
|
||||||
pull_overwrite_cache: bool,
|
pull_overwrite_cache: bool,
|
||||||
#[arg(short, long, help = "Name of the guest")]
|
#[arg(short, long, help = "Name of the zone")]
|
||||||
name: Option<String>,
|
name: Option<String>,
|
||||||
#[arg(
|
#[arg(short, long, default_value_t = 1, help = "vCPUs available to the zone")]
|
||||||
short,
|
|
||||||
long,
|
|
||||||
default_value_t = 1,
|
|
||||||
help = "vCPUs available to the guest"
|
|
||||||
)]
|
|
||||||
cpus: u32,
|
cpus: u32,
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short,
|
||||||
long,
|
long,
|
||||||
default_value_t = 512,
|
default_value_t = 512,
|
||||||
help = "Memory available to the guest, in megabytes"
|
help = "Memory available to the zone, in megabytes"
|
||||||
)]
|
)]
|
||||||
mem: u64,
|
mem: u64,
|
||||||
#[arg[short = 'D', long = "device", help = "Devices to request for the guest"]]
|
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
|
||||||
device: Vec<String>,
|
device: Vec<String>,
|
||||||
#[arg[short, long, help = "Environment variables set in the guest"]]
|
#[arg[short, long, help = "Environment variables set in the zone"]]
|
||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short,
|
||||||
long,
|
long,
|
||||||
help = "Attach to the guest after guest starts, implies --wait"
|
help = "Attach to the zone after zone starts, implies --wait"
|
||||||
)]
|
)]
|
||||||
attach: bool,
|
attach: bool,
|
||||||
#[arg(
|
#[arg(
|
||||||
short = 'W',
|
short = 'W',
|
||||||
long,
|
long,
|
||||||
help = "Wait for the guest to start, implied by --attach"
|
help = "Wait for the zone to start, implied by --attach"
|
||||||
)]
|
)]
|
||||||
wait: bool,
|
wait: bool,
|
||||||
#[arg(short = 'k', long, help = "OCI kernel image for guest to use")]
|
#[arg(short = 'k', long, help = "OCI kernel image for zone to use")]
|
||||||
kernel: Option<String>,
|
kernel: Option<String>,
|
||||||
#[arg(short = 'I', long, help = "OCI initrd image for guest to use")]
|
#[arg(short = 'I', long, help = "OCI initrd image for zone to use")]
|
||||||
initrd: Option<String>,
|
initrd: Option<String>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
#[arg(help = "Container image for guest to use")]
|
#[arg(help = "Container image for zone to use")]
|
||||||
oci: String,
|
oci: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
allow_hyphen_values = true,
|
allow_hyphen_values = true,
|
||||||
trailing_var_arg = true,
|
trailing_var_arg = true,
|
||||||
help = "Command to run inside the guest"
|
help = "Command to run inside the zone"
|
||||||
)]
|
)]
|
||||||
command: Vec<String>,
|
command: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LaunchCommand {
|
impl ZoneLaunchCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
@ -117,18 +112,18 @@ impl LaunchCommand {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let request = CreateGuestRequest {
|
let request = CreateZoneRequest {
|
||||||
spec: Some(GuestSpec {
|
spec: Some(ZoneSpec {
|
||||||
name: self.name.unwrap_or_default(),
|
name: self.name.unwrap_or_default(),
|
||||||
image: Some(image),
|
image: Some(image),
|
||||||
kernel,
|
kernel,
|
||||||
initrd,
|
initrd,
|
||||||
vcpus: self.cpus,
|
vcpus: self.cpus,
|
||||||
mem: self.mem,
|
mem: self.mem,
|
||||||
task: Some(GuestTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| GuestTaskSpecEnvVar {
|
.map(|(key, value)| ZoneTaskSpecEnvVar {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
value: value.clone(),
|
value: value.clone(),
|
||||||
})
|
})
|
||||||
@ -140,26 +135,26 @@ impl LaunchCommand {
|
|||||||
devices: self
|
devices: self
|
||||||
.device
|
.device
|
||||||
.iter()
|
.iter()
|
||||||
.map(|name| GuestSpecDevice { name: name.clone() })
|
.map(|name| ZoneSpecDevice { name: name.clone() })
|
||||||
.collect(),
|
.collect(),
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
let response = client
|
let response = client
|
||||||
.create_guest(Request::new(request))
|
.create_zone(Request::new(request))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
let id = response.guest_id;
|
let id = response.zone_id;
|
||||||
|
|
||||||
if self.wait || self.attach {
|
if self.wait || self.attach {
|
||||||
wait_guest_started(&id, events.clone()).await?;
|
wait_zone_started(&id, events.clone()).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let code = if self.attach {
|
let code = if self.attach {
|
||||||
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
|
||||||
select! {
|
select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
||||||
@ -180,7 +175,7 @@ impl LaunchCommand {
|
|||||||
client: &mut ControlServiceClient<Channel>,
|
client: &mut ControlServiceClient<Channel>,
|
||||||
image: &str,
|
image: &str,
|
||||||
format: OciImageFormat,
|
format: OciImageFormat,
|
||||||
) -> Result<GuestImageSpec> {
|
) -> Result<ZoneImageSpec> {
|
||||||
let response = client
|
let response = client
|
||||||
.pull_image(PullImageRequest {
|
.pull_image(PullImageRequest {
|
||||||
image: image.to_string(),
|
image: image.to_string(),
|
||||||
@ -189,8 +184,8 @@ impl LaunchCommand {
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||||
Ok(GuestImageSpec {
|
Ok(ZoneImageSpec {
|
||||||
image: Some(Image::Oci(GuestOciImageSpec {
|
image: Some(Image::Oci(ZoneOciImageSpec {
|
||||||
digest: reply.digest,
|
digest: reply.digest,
|
||||||
format: reply.format,
|
format: reply.format,
|
||||||
})),
|
})),
|
||||||
@ -198,38 +193,38 @@ impl LaunchCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {
|
async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
match event {
|
match event {
|
||||||
Event::GuestChanged(changed) => {
|
Event::ZoneChanged(changed) => {
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = state.error_info {
|
||||||
if state.status() == GuestStatus::Failed {
|
if state.status() == ZoneStatus::Failed {
|
||||||
error!("launch failed: {}", error.message);
|
error!("launch failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
error!("guest error: {}", error.message);
|
error!("zone error: {}", error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Destroyed {
|
if state.status() == ZoneStatus::Destroyed {
|
||||||
error!("guest destroyed");
|
error!("zone destroyed");
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Started {
|
if state.status() == ZoneStatus::Started {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -4,9 +4,9 @@ use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestStatus},
|
common::{Zone, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, ListGuestsRequest, ResolveGuestRequest,
|
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -14,10 +14,10 @@ use krata::{
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::format::{guest_simple_line, guest_status_text, kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_status_text};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum ListFormat {
|
enum ZoneListFormat {
|
||||||
Table,
|
Table,
|
||||||
Json,
|
Json,
|
||||||
JsonPretty,
|
JsonPretty,
|
||||||
@ -28,41 +28,39 @@ enum ListFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "List the guests on the isolation engine")]
|
#[command(about = "List the zones on the isolation engine")]
|
||||||
pub struct ListCommand {
|
pub struct ZoneListCommand {
|
||||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
format: ListFormat,
|
format: ZoneListFormat,
|
||||||
#[arg(help = "Limit to a single guest, either the name or the uuid")]
|
#[arg(help = "Limit to a single zone, either the name or the uuid")]
|
||||||
guest: Option<String>,
|
zone: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListCommand {
|
impl ZoneListCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
_events: EventStream,
|
_events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut guests = if let Some(ref guest) = self.guest {
|
let mut zones = if let Some(ref zone) = self.zone {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest { name: zone.clone() }))
|
||||||
name: guest.clone(),
|
|
||||||
}))
|
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
vec![guest]
|
vec![zone]
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("unable to resolve guest '{}'", guest));
|
return Err(anyhow!("unable to resolve zone '{}'", zone));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
client
|
client
|
||||||
.list_guests(Request::new(ListGuestsRequest {}))
|
.list_zones(Request::new(ListZonesRequest {}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests
|
.zones
|
||||||
};
|
};
|
||||||
|
|
||||||
guests.sort_by(|a, b| {
|
zones.sort_by(|a, b| {
|
||||||
a.spec
|
a.spec
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.name.as_str())
|
.map(|x| x.name.as_str())
|
||||||
@ -71,26 +69,26 @@ impl ListCommand {
|
|||||||
});
|
});
|
||||||
|
|
||||||
match self.format {
|
match self.format {
|
||||||
ListFormat::Table => {
|
ZoneListFormat::Table => {
|
||||||
self.print_guest_table(guests)?;
|
self.print_zone_table(zones)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Simple => {
|
ZoneListFormat::Simple => {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
println!("{}", guest_simple_line(&guest));
|
println!("{}", zone_simple_line(&zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Json | ListFormat::JsonPretty | ListFormat::Yaml => {
|
ZoneListFormat::Json | ZoneListFormat::JsonPretty | ZoneListFormat::Yaml => {
|
||||||
let mut values = Vec::new();
|
let mut values = Vec::new();
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let message = proto2dynamic(guest)?;
|
let message = proto2dynamic(zone)?;
|
||||||
values.push(serde_json::to_value(message)?);
|
values.push(serde_json::to_value(message)?);
|
||||||
}
|
}
|
||||||
let value = Value::Array(values);
|
let value = Value::Array(values);
|
||||||
let encoded = if self.format == ListFormat::JsonPretty {
|
let encoded = if self.format == ZoneListFormat::JsonPretty {
|
||||||
serde_json::to_string_pretty(&value)?
|
serde_json::to_string_pretty(&value)?
|
||||||
} else if self.format == ListFormat::Yaml {
|
} else if self.format == ZoneListFormat::Yaml {
|
||||||
serde_yaml::to_string(&value)?
|
serde_yaml::to_string(&value)?
|
||||||
} else {
|
} else {
|
||||||
serde_json::to_string(&value)?
|
serde_json::to_string(&value)?
|
||||||
@ -98,65 +96,63 @@ impl ListCommand {
|
|||||||
println!("{}", encoded.trim());
|
println!("{}", encoded.trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Jsonl => {
|
ZoneListFormat::Jsonl => {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let message = proto2dynamic(guest)?;
|
let message = proto2dynamic(zone)?;
|
||||||
println!("{}", serde_json::to_string(&message)?);
|
println!("{}", serde_json::to_string(&message)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::KeyValue => {
|
ZoneListFormat::KeyValue => {
|
||||||
self.print_key_value(guests)?;
|
self.print_key_value(zones)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_guest_table(&self, guests: Vec<Guest>) -> Result<()> {
|
fn print_zone_table(&self, zones: Vec<Zone>) -> Result<()> {
|
||||||
let mut table = Table::new();
|
let mut table = Table::new();
|
||||||
table.load_preset(UTF8_FULL_CONDENSED);
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let ipv4 = guest
|
let ipv4 = zone
|
||||||
.state
|
.state
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network.as_ref())
|
||||||
.map(|x| x.guest_ipv4.as_str())
|
.map(|x| x.zone_ipv4.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let ipv6 = guest
|
let ipv6 = zone
|
||||||
.state
|
.state
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network.as_ref())
|
||||||
.map(|x| x.guest_ipv6.as_str())
|
.map(|x| x.zone_ipv6.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let Some(spec) = guest.spec else {
|
let Some(spec) = zone.spec else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let status = guest.state.as_ref().cloned().unwrap_or_default().status();
|
let status = zone.state.as_ref().cloned().unwrap_or_default().status();
|
||||||
let status_text = guest_status_text(status);
|
let status_text = zone_status_text(status);
|
||||||
|
|
||||||
let status_color = match status {
|
let status_color = match status {
|
||||||
GuestStatus::Destroyed | GuestStatus::Failed => Color::Red,
|
ZoneStatus::Destroyed | ZoneStatus::Failed => Color::Red,
|
||||||
GuestStatus::Destroying | GuestStatus::Exited | GuestStatus::Starting => {
|
ZoneStatus::Destroying | ZoneStatus::Exited | ZoneStatus::Starting => Color::Yellow,
|
||||||
Color::Yellow
|
ZoneStatus::Started => Color::Green,
|
||||||
}
|
|
||||||
GuestStatus::Started => Color::Green,
|
|
||||||
_ => Color::Reset,
|
_ => Color::Reset,
|
||||||
};
|
};
|
||||||
|
|
||||||
table.add_row(vec![
|
table.add_row(vec![
|
||||||
Cell::new(spec.name),
|
Cell::new(spec.name),
|
||||||
Cell::new(guest.id),
|
Cell::new(zone.id),
|
||||||
Cell::new(status_text).fg(status_color),
|
Cell::new(status_text).fg(status_color),
|
||||||
Cell::new(ipv4.to_string()),
|
Cell::new(ipv4.to_string()),
|
||||||
Cell::new(ipv6.to_string()),
|
Cell::new(ipv6.to_string()),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
if table.is_empty() {
|
if table.is_empty() {
|
||||||
if self.guest.is_none() {
|
if self.zone.is_none() {
|
||||||
println!("no guests have been launched");
|
println!("no zones have been launched");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
println!("{}", table);
|
println!("{}", table);
|
||||||
@ -164,9 +160,9 @@ impl ListCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_key_value(&self, guests: Vec<Guest>) -> Result<()> {
|
fn print_key_value(&self, zones: Vec<Zone>) -> Result<()> {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let kvs = proto2kv(guest)?;
|
let kvs = proto2kv(zone)?;
|
||||||
println!("{}", kv2line(kvs),);
|
println!("{}", kv2line(kvs),);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
@ -3,7 +3,7 @@ use async_stream::stream;
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::control::{control_service_client::ControlServiceClient, ConsoleDataRequest},
|
v1::control::{control_service_client::ControlServiceClient, ZoneConsoleRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::select;
|
use tokio::select;
|
||||||
@ -12,39 +12,39 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "View the logs of a guest")]
|
#[command(about = "View the logs of a zone")]
|
||||||
pub struct LogsCommand {
|
pub struct ZoneLogsCommand {
|
||||||
#[arg(short, long, help = "Follow output from the guest")]
|
#[arg(short, long, help = "Follow output from the zone")]
|
||||||
follow: bool,
|
follow: bool,
|
||||||
#[arg(help = "Guest to show logs for, either the name or the uuid")]
|
#[arg(help = "Zone to show logs for, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LogsCommand {
|
impl ZoneLogsCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let guest_id_stream = guest_id.clone();
|
let zone_id_stream = zone_id.clone();
|
||||||
let follow = self.follow;
|
let follow = self.follow;
|
||||||
let input = stream! {
|
let input = stream! {
|
||||||
yield ConsoleDataRequest { guest_id: guest_id_stream, data: Vec::new() };
|
yield ZoneConsoleRequest { zone_id: zone_id_stream, data: Vec::new() };
|
||||||
if follow {
|
if follow {
|
||||||
let mut pending = pending::<ConsoleDataRequest>();
|
let mut pending = pending::<ZoneConsoleRequest>();
|
||||||
while let Some(x) = pending.next().await {
|
while let Some(x) = pending.next().await {
|
||||||
yield x;
|
yield x;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(guest_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
@ -3,8 +3,8 @@ use clap::{Parser, ValueEnum};
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestMetricNode,
|
common::ZoneMetricNode,
|
||||||
control::{control_service_client::ControlServiceClient, ReadGuestMetricsRequest},
|
control::{control_service_client::ControlServiceClient, ReadZoneMetricsRequest},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -12,10 +12,10 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
|
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
|
||||||
|
|
||||||
use super::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum MetricsFormat {
|
enum ZoneMetricsFormat {
|
||||||
Tree,
|
Tree,
|
||||||
Json,
|
Json,
|
||||||
JsonPretty,
|
JsonPretty,
|
||||||
@ -24,37 +24,37 @@ enum MetricsFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Read metrics from the guest")]
|
#[command(about = "Read metrics from the zone")]
|
||||||
pub struct MetricsCommand {
|
pub struct ZoneMetricsCommand {
|
||||||
#[arg(short, long, default_value = "tree", help = "Output format")]
|
#[arg(short, long, default_value = "tree", help = "Output format")]
|
||||||
format: MetricsFormat,
|
format: ZoneMetricsFormat,
|
||||||
#[arg(help = "Guest to read metrics for, either the name or the uuid")]
|
#[arg(help = "Zone to read metrics for, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetricsCommand {
|
impl ZoneMetricsCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
_events: EventStream,
|
_events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let root = client
|
let root = client
|
||||||
.read_guest_metrics(ReadGuestMetricsRequest { guest_id })
|
.read_zone_metrics(ReadZoneMetricsRequest { zone_id })
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.root
|
.root
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
match self.format {
|
match self.format {
|
||||||
MetricsFormat::Tree => {
|
ZoneMetricsFormat::Tree => {
|
||||||
self.print_metrics_tree(root)?;
|
self.print_metrics_tree(root)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricsFormat::Json | MetricsFormat::JsonPretty | MetricsFormat::Yaml => {
|
ZoneMetricsFormat::Json | ZoneMetricsFormat::JsonPretty | ZoneMetricsFormat::Yaml => {
|
||||||
let value = serde_json::to_value(proto2dynamic(root)?)?;
|
let value = serde_json::to_value(proto2dynamic(root)?)?;
|
||||||
let encoded = if self.format == MetricsFormat::JsonPretty {
|
let encoded = if self.format == ZoneMetricsFormat::JsonPretty {
|
||||||
serde_json::to_string_pretty(&value)?
|
serde_json::to_string_pretty(&value)?
|
||||||
} else if self.format == MetricsFormat::Yaml {
|
} else if self.format == ZoneMetricsFormat::Yaml {
|
||||||
serde_yaml::to_string(&value)?
|
serde_yaml::to_string(&value)?
|
||||||
} else {
|
} else {
|
||||||
serde_json::to_string(&value)?
|
serde_json::to_string(&value)?
|
||||||
@ -62,7 +62,7 @@ impl MetricsCommand {
|
|||||||
println!("{}", encoded.trim());
|
println!("{}", encoded.trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricsFormat::KeyValue => {
|
ZoneMetricsFormat::KeyValue => {
|
||||||
self.print_key_value(root)?;
|
self.print_key_value(root)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,12 +70,12 @@ impl MetricsCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_metrics_tree(&self, root: GuestMetricNode) -> Result<()> {
|
fn print_metrics_tree(&self, root: ZoneMetricNode) -> Result<()> {
|
||||||
print!("{}", metrics_tree(root));
|
print!("{}", metrics_tree(root));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_key_value(&self, metrics: GuestMetricNode) -> Result<()> {
|
fn print_key_value(&self, metrics: ZoneMetricNode) -> Result<()> {
|
||||||
let kvs = metrics_flat(metrics);
|
let kvs = metrics_flat(metrics);
|
||||||
println!("{}", kv2line(kvs));
|
println!("{}", kv2line(kvs));
|
||||||
Ok(())
|
Ok(())
|
89
crates/ctl/src/cli/zone/mod.rs
Normal file
89
crates/ctl/src/cli/zone/mod.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use tonic::transport::Channel;
|
||||||
|
|
||||||
|
use krata::events::EventStream;
|
||||||
|
use krata::v1::control::control_service_client::ControlServiceClient;
|
||||||
|
|
||||||
|
use crate::cli::zone::attach::ZoneAttachCommand;
|
||||||
|
use crate::cli::zone::destroy::ZoneDestroyCommand;
|
||||||
|
use crate::cli::zone::exec::ZoneExecCommand;
|
||||||
|
use crate::cli::zone::launch::ZoneLaunchCommand;
|
||||||
|
use crate::cli::zone::list::ZoneListCommand;
|
||||||
|
use crate::cli::zone::logs::ZoneLogsCommand;
|
||||||
|
use crate::cli::zone::metrics::ZoneMetricsCommand;
|
||||||
|
use crate::cli::zone::resolve::ZoneResolveCommand;
|
||||||
|
use crate::cli::zone::top::ZoneTopCommand;
|
||||||
|
use crate::cli::zone::watch::ZoneWatchCommand;
|
||||||
|
|
||||||
|
pub mod attach;
|
||||||
|
pub mod destroy;
|
||||||
|
pub mod exec;
|
||||||
|
pub mod launch;
|
||||||
|
pub mod list;
|
||||||
|
pub mod logs;
|
||||||
|
pub mod metrics;
|
||||||
|
pub mod resolve;
|
||||||
|
pub mod top;
|
||||||
|
pub mod watch;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(about = "Manage the zones on the isolation engine")]
|
||||||
|
pub struct ZoneCommand {
|
||||||
|
#[command(subcommand)]
|
||||||
|
subcommand: ZoneCommands,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZoneCommand {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.subcommand.run(client, events).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum ZoneCommands {
|
||||||
|
Attach(ZoneAttachCommand),
|
||||||
|
List(ZoneListCommand),
|
||||||
|
Launch(ZoneLaunchCommand),
|
||||||
|
Destroy(ZoneDestroyCommand),
|
||||||
|
Exec(ZoneExecCommand),
|
||||||
|
Logs(ZoneLogsCommand),
|
||||||
|
Metrics(ZoneMetricsCommand),
|
||||||
|
Resolve(ZoneResolveCommand),
|
||||||
|
Top(ZoneTopCommand),
|
||||||
|
Watch(ZoneWatchCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ZoneCommands {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
client: ControlServiceClient<Channel>,
|
||||||
|
events: EventStream,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
ZoneCommands::Launch(launch) => launch.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Destroy(destroy) => destroy.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Attach(attach) => attach.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Logs(logs) => logs.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::List(list) => list.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Watch(watch) => watch.run(events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Resolve(resolve) => resolve.run(client).await,
|
||||||
|
|
||||||
|
ZoneCommands::Metrics(metrics) => metrics.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Top(top) => top.run(client, events).await,
|
||||||
|
|
||||||
|
ZoneCommands::Exec(exec) => exec.run(client).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,26 +1,26 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveGuestRequest};
|
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest};
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Resolve a guest name to a uuid")]
|
#[command(about = "Resolve a zone name to a uuid")]
|
||||||
pub struct ResolveCommand {
|
pub struct ZoneResolveCommand {
|
||||||
#[arg(help = "Guest name")]
|
#[arg(help = "Zone name")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResolveCommand {
|
impl ZoneResolveCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest {
|
||||||
name: self.guest.clone(),
|
name: self.zone.clone(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
println!("{}", guest.id);
|
println!("{}", zone.id);
|
||||||
} else {
|
} else {
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
@ -24,19 +24,19 @@ use ratatui::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
format::guest_status_text,
|
format::zone_status_text,
|
||||||
metrics::{
|
metrics::{
|
||||||
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Dashboard for running guests")]
|
#[command(about = "Dashboard for running zones")]
|
||||||
pub struct TopCommand {}
|
pub struct ZoneTopCommand {}
|
||||||
|
|
||||||
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
|
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
|
||||||
|
|
||||||
impl TopCommand {
|
impl ZoneTopCommand {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
self,
|
self,
|
||||||
client: ControlServiceClient<Channel>,
|
client: ControlServiceClient<Channel>,
|
||||||
@ -44,14 +44,14 @@ impl TopCommand {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let collector = MultiMetricCollector::new(client, events, Duration::from_millis(200))?;
|
let collector = MultiMetricCollector::new(client, events, Duration::from_millis(200))?;
|
||||||
let collector = collector.launch().await?;
|
let collector = collector.launch().await?;
|
||||||
let mut tui = TopCommand::init()?;
|
let mut tui = ZoneTopCommand::init()?;
|
||||||
let mut app = TopApp {
|
let mut app = ZoneTopApp {
|
||||||
metrics: MultiMetricState { guests: vec![] },
|
metrics: MultiMetricState { zones: vec![] },
|
||||||
exit: false,
|
exit: false,
|
||||||
table: TableState::new(),
|
table: TableState::new(),
|
||||||
};
|
};
|
||||||
app.run(collector, &mut tui).await?;
|
app.run(collector, &mut tui).await?;
|
||||||
TopCommand::restore()?;
|
ZoneTopCommand::restore()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,13 +68,13 @@ impl TopCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TopApp {
|
pub struct ZoneTopApp {
|
||||||
table: TableState,
|
table: TableState,
|
||||||
metrics: MultiMetricState,
|
metrics: MultiMetricState,
|
||||||
exit: bool,
|
exit: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TopApp {
|
impl ZoneTopApp {
|
||||||
pub async fn run(
|
pub async fn run(
|
||||||
&mut self,
|
&mut self,
|
||||||
mut collector: MultiMetricCollectorHandle,
|
mut collector: MultiMetricCollectorHandle,
|
||||||
@ -136,7 +136,7 @@ impl TopApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Widget for &mut TopApp {
|
impl Widget for &mut ZoneTopApp {
|
||||||
fn render(self, area: Rect, buf: &mut Buffer) {
|
fn render(self, area: Rect, buf: &mut Buffer) {
|
||||||
let title = Title::from(" krata isolation engine ".bold());
|
let title = Title::from(" krata isolation engine ".bold());
|
||||||
let instructions = Title::from(vec![" Quit ".into(), "<Q> ".blue().bold()]);
|
let instructions = Title::from(vec![" Quit ".into(), "<Q> ".blue().bold()]);
|
||||||
@ -152,12 +152,12 @@ impl Widget for &mut TopApp {
|
|||||||
|
|
||||||
let mut rows = vec![];
|
let mut rows = vec![];
|
||||||
|
|
||||||
for ms in &self.metrics.guests {
|
for ms in &self.metrics.zones {
|
||||||
let Some(ref spec) = ms.guest.spec else {
|
let Some(ref spec) = ms.zone.spec else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = ms.guest.state else {
|
let Some(ref state) = ms.zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -176,8 +176,8 @@ impl Widget for &mut TopApp {
|
|||||||
|
|
||||||
let row = Row::new(vec![
|
let row = Row::new(vec![
|
||||||
spec.name.clone(),
|
spec.name.clone(),
|
||||||
ms.guest.id.clone(),
|
ms.zone.id.clone(),
|
||||||
guest_status_text(state.status()),
|
zone_status_text(state.status()),
|
||||||
memory_total.unwrap_or_default(),
|
memory_total.unwrap_or_default(),
|
||||||
memory_used.unwrap_or_default(),
|
memory_used.unwrap_or_default(),
|
||||||
memory_free.unwrap_or_default(),
|
memory_free.unwrap_or_default(),
|
@ -2,53 +2,48 @@ use anyhow::Result;
|
|||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{common::Guest, control::watch_events_reply::Event},
|
v1::{common::Zone, control::watch_events_reply::Event},
|
||||||
};
|
};
|
||||||
use prost_reflect::ReflectMessage;
|
use prost_reflect::ReflectMessage;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::format::{guest_simple_line, kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum WatchFormat {
|
enum ZoneWatchFormat {
|
||||||
Simple,
|
Simple,
|
||||||
Json,
|
Json,
|
||||||
KeyValue,
|
KeyValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Watch for guest changes")]
|
#[command(about = "Watch for zone changes")]
|
||||||
pub struct WatchCommand {
|
pub struct ZoneWatchCommand {
|
||||||
#[arg(short, long, default_value = "simple", help = "Output format")]
|
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||||
format: WatchFormat,
|
format: ZoneWatchFormat,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WatchCommand {
|
impl ZoneWatchCommand {
|
||||||
pub async fn run(self, events: EventStream) -> Result<()> {
|
pub async fn run(self, events: EventStream) -> Result<()> {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
loop {
|
loop {
|
||||||
let event = stream.recv().await?;
|
let event = stream.recv().await?;
|
||||||
|
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let guest = changed.guest.clone();
|
let zone = changed.zone.clone();
|
||||||
self.print_event("guest.changed", changed, guest)?;
|
self.print_event("zone.changed", changed, zone)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_event(
|
fn print_event(&self, typ: &str, event: impl ReflectMessage, zone: Option<Zone>) -> Result<()> {
|
||||||
&self,
|
|
||||||
typ: &str,
|
|
||||||
event: impl ReflectMessage,
|
|
||||||
guest: Option<Guest>,
|
|
||||||
) -> Result<()> {
|
|
||||||
match self.format {
|
match self.format {
|
||||||
WatchFormat::Simple => {
|
ZoneWatchFormat::Simple => {
|
||||||
if let Some(guest) = guest {
|
if let Some(zone) = zone {
|
||||||
println!("{}", guest_simple_line(&guest));
|
println!("{}", zone_simple_line(&zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WatchFormat::Json => {
|
ZoneWatchFormat::Json => {
|
||||||
let message = proto2dynamic(event)?;
|
let message = proto2dynamic(event)?;
|
||||||
let mut value = serde_json::to_value(&message)?;
|
let mut value = serde_json::to_value(&message)?;
|
||||||
if let Value::Object(ref mut map) = value {
|
if let Value::Object(ref mut map) = value {
|
||||||
@ -57,7 +52,7 @@ impl WatchCommand {
|
|||||||
println!("{}", serde_json::to_string(&value)?);
|
println!("{}", serde_json::to_string(&value)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
WatchFormat::KeyValue => {
|
ZoneWatchFormat::KeyValue => {
|
||||||
let mut map = proto2kv(event)?;
|
let mut map = proto2kv(event)?;
|
||||||
map.insert("event.type".to_string(), typ.to_string());
|
map.insert("event.type".to_string(), typ.to_string());
|
||||||
println!("{}", kv2line(map),);
|
println!("{}", kv2line(map),);
|
@ -7,10 +7,10 @@ use crossterm::{
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestStatus,
|
common::ZoneStatus,
|
||||||
control::{
|
control::{
|
||||||
watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest, ExecGuestReply,
|
watch_events_reply::Event, ExecZoneReply, ExecZoneRequest, ZoneConsoleReply,
|
||||||
ExecGuestRequest,
|
ZoneConsoleRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -25,10 +25,10 @@ use tonic::Streaming;
|
|||||||
pub struct StdioConsoleStream;
|
pub struct StdioConsoleStream;
|
||||||
|
|
||||||
impl StdioConsoleStream {
|
impl StdioConsoleStream {
|
||||||
pub async fn stdin_stream(guest: String) -> impl Stream<Item = ConsoleDataRequest> {
|
pub async fn stdin_stream(zone: String) -> impl Stream<Item = ZoneConsoleRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield ConsoleDataRequest { guest_id: guest, data: vec![] };
|
yield ZoneConsoleRequest { zone_id: zone, data: vec![] };
|
||||||
|
|
||||||
let mut buffer = vec![0u8; 60];
|
let mut buffer = vec![0u8; 60];
|
||||||
loop {
|
loop {
|
||||||
@ -43,14 +43,14 @@ impl StdioConsoleStream {
|
|||||||
if size == 1 && buffer[0] == 0x1d {
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
yield ConsoleDataRequest { guest_id: String::default(), data };
|
yield ZoneConsoleRequest { zone_id: String::default(), data };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdin_stream_exec(
|
pub async fn stdin_stream_exec(
|
||||||
initial: ExecGuestRequest,
|
initial: ExecZoneRequest,
|
||||||
) -> impl Stream<Item = ExecGuestRequest> {
|
) -> impl Stream<Item = ExecZoneRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield initial;
|
yield initial;
|
||||||
@ -68,12 +68,12 @@ impl StdioConsoleStream {
|
|||||||
if size == 1 && buffer[0] == 0x1d {
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
yield ExecGuestRequest { guest_id: String::default(), task: None, data };
|
yield ExecZoneRequest { zone_id: String::default(), task: None, data };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
|
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>) -> Result<()> {
|
||||||
if stdin().is_tty() {
|
if stdin().is_tty() {
|
||||||
enable_raw_mode()?;
|
enable_raw_mode()?;
|
||||||
StdioConsoleStream::register_terminal_restore_hook()?;
|
StdioConsoleStream::register_terminal_restore_hook()?;
|
||||||
@ -90,7 +90,7 @@ impl StdioConsoleStream {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn exec_output(mut stream: Streaming<ExecGuestReply>) -> Result<i32> {
|
pub async fn exec_output(mut stream: Streaming<ExecZoneReply>) -> Result<i32> {
|
||||||
let mut stdout = stdout();
|
let mut stdout = stdout();
|
||||||
let mut stderr = stderr();
|
let mut stderr = stderr();
|
||||||
while let Some(reply) = stream.next().await {
|
while let Some(reply) = stream.next().await {
|
||||||
@ -106,33 +106,33 @@ impl StdioConsoleStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reply.exited {
|
if reply.exited {
|
||||||
if reply.error.is_empty() {
|
return if reply.error.is_empty() {
|
||||||
return Ok(reply.exit_code);
|
Ok(reply.exit_code)
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("exec failed: {}", reply.error));
|
Err(anyhow!("exec failed: {}", reply.error))
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(-1)
|
Ok(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn guest_exit_hook(
|
pub async fn zone_exit_hook(
|
||||||
id: String,
|
id: String,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<JoinHandle<Option<i32>>> {
|
) -> Result<JoinHandle<Option<i32>>> {
|
||||||
Ok(tokio::task::spawn(async move {
|
Ok(tokio::task::spawn(async move {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +141,7 @@ impl StdioConsoleStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let status = state.status();
|
let status = state.status();
|
||||||
if status == GuestStatus::Destroying || status == GuestStatus::Destroyed {
|
if status == ZoneStatus::Destroying || status == ZoneStatus::Destroyed {
|
||||||
return Some(10);
|
return Some(10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use std::{collections::HashMap, time::Duration};
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use fancy_duration::FancyDuration;
|
use fancy_duration::FancyDuration;
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
|
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneStatus};
|
||||||
use prost_reflect::{DynamicMessage, ReflectMessage};
|
use prost_reflect::{DynamicMessage, ReflectMessage};
|
||||||
use prost_types::Value;
|
use prost_types::Value;
|
||||||
use termtree::Tree;
|
use termtree::Tree;
|
||||||
@ -75,32 +75,31 @@ pub fn kv2line(map: HashMap<String, String>) -> String {
|
|||||||
.join(" ")
|
.join(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guest_status_text(status: GuestStatus) -> String {
|
pub fn zone_status_text(status: ZoneStatus) -> String {
|
||||||
match status {
|
match status {
|
||||||
GuestStatus::Starting => "starting",
|
ZoneStatus::Starting => "starting",
|
||||||
GuestStatus::Started => "started",
|
ZoneStatus::Started => "started",
|
||||||
GuestStatus::Destroying => "destroying",
|
ZoneStatus::Destroying => "destroying",
|
||||||
GuestStatus::Destroyed => "destroyed",
|
ZoneStatus::Destroyed => "destroyed",
|
||||||
GuestStatus::Exited => "exited",
|
ZoneStatus::Exited => "exited",
|
||||||
GuestStatus::Failed => "failed",
|
ZoneStatus::Failed => "failed",
|
||||||
_ => "unknown",
|
_ => "unknown",
|
||||||
}
|
}
|
||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guest_simple_line(guest: &Guest) -> String {
|
pub fn zone_simple_line(zone: &Zone) -> String {
|
||||||
let state = guest_status_text(
|
let state = zone_status_text(
|
||||||
guest
|
zone.state
|
||||||
.state
|
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.status())
|
.map(|x| x.status())
|
||||||
.unwrap_or(GuestStatus::Unknown),
|
.unwrap_or(ZoneStatus::Unknown),
|
||||||
);
|
);
|
||||||
let name = guest.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
||||||
let network = guest.state.as_ref().and_then(|x| x.network.as_ref());
|
let network = zone.state.as_ref().and_then(|x| x.network.as_ref());
|
||||||
let ipv4 = network.map(|x| x.guest_ipv4.as_str()).unwrap_or("");
|
let ipv4 = network.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
|
||||||
let ipv6 = network.map(|x| x.guest_ipv6.as_str()).unwrap_or("");
|
let ipv6 = network.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
|
||||||
format!("{}\t{}\t{}\t{}\t{}", guest.id, state, name, ipv4, ipv6)
|
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metrics_value_string(value: Value) -> String {
|
fn metrics_value_string(value: Value) -> String {
|
||||||
@ -116,18 +115,18 @@ fn metrics_value_numeric(value: Value) -> f64 {
|
|||||||
string.parse::<f64>().ok().unwrap_or(f64::NAN)
|
string.parse::<f64>().ok().unwrap_or(f64::NAN)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_value_pretty(value: Value, format: GuestMetricFormat) -> String {
|
pub fn metrics_value_pretty(value: Value, format: ZoneMetricFormat) -> String {
|
||||||
match format {
|
match format {
|
||||||
GuestMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
|
ZoneMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
|
||||||
GuestMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
|
ZoneMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
|
||||||
GuestMetricFormat::DurationSeconds => {
|
ZoneMetricFormat::DurationSeconds => {
|
||||||
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
|
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
|
||||||
}
|
}
|
||||||
_ => metrics_value_string(value),
|
_ => metrics_value_string(value),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metrics_flat_internal(prefix: &str, node: GuestMetricNode, map: &mut HashMap<String, String>) {
|
fn metrics_flat_internal(prefix: &str, node: ZoneMetricNode, map: &mut HashMap<String, String>) {
|
||||||
if let Some(value) = node.value {
|
if let Some(value) = node.value {
|
||||||
map.insert(prefix.to_string(), metrics_value_string(value));
|
map.insert(prefix.to_string(), metrics_value_string(value));
|
||||||
}
|
}
|
||||||
@ -142,13 +141,13 @@ fn metrics_flat_internal(prefix: &str, node: GuestMetricNode, map: &mut HashMap<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_flat(root: GuestMetricNode) -> HashMap<String, String> {
|
pub fn metrics_flat(root: ZoneMetricNode) -> HashMap<String, String> {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
metrics_flat_internal("", root, &mut map);
|
metrics_flat_internal("", root, &mut map);
|
||||||
map
|
map
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_tree(node: GuestMetricNode) -> Tree<String> {
|
pub fn metrics_tree(node: ZoneMetricNode) -> Tree<String> {
|
||||||
let mut name = node.name.to_string();
|
let mut name = node.name.to_string();
|
||||||
let format = node.format();
|
let format = node.format();
|
||||||
if let Some(value) = node.value {
|
if let Some(value) = node.value {
|
||||||
|
@ -2,10 +2,10 @@ use anyhow::Result;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestMetricNode, GuestStatus},
|
common::{Zone, ZoneMetricNode, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
ListGuestsRequest, ReadGuestMetricsRequest,
|
ListZonesRequest, ReadZoneMetricsRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -22,12 +22,12 @@ use tonic::transport::Channel;
|
|||||||
use crate::format::metrics_value_pretty;
|
use crate::format::metrics_value_pretty;
|
||||||
|
|
||||||
pub struct MetricState {
|
pub struct MetricState {
|
||||||
pub guest: Guest,
|
pub zone: Zone,
|
||||||
pub root: Option<GuestMetricNode>,
|
pub root: Option<ZoneMetricNode>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MultiMetricState {
|
pub struct MultiMetricState {
|
||||||
pub guests: Vec<MetricState>,
|
pub zones: Vec<MetricState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MultiMetricCollector {
|
pub struct MultiMetricCollector {
|
||||||
@ -72,26 +72,26 @@ impl MultiMetricCollector {
|
|||||||
|
|
||||||
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
|
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
|
||||||
let mut events = self.events.subscribe();
|
let mut events = self.events.subscribe();
|
||||||
let mut guests: Vec<Guest> = self
|
let mut zones: Vec<Zone> = self
|
||||||
.client
|
.client
|
||||||
.list_guests(ListGuestsRequest {})
|
.list_zones(ListZonesRequest {})
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests;
|
.zones;
|
||||||
loop {
|
loop {
|
||||||
let collect = select! {
|
let collect = select! {
|
||||||
x = events.recv() => match x {
|
x = events.recv() => match x {
|
||||||
Ok(event) => {
|
Ok(event) => {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
guests.retain(|x| x.id != guest.id);
|
zones.retain(|x| x.id != zone.id);
|
||||||
if state.status() != GuestStatus::Destroying {
|
if state.status() != ZoneStatus::Destroying {
|
||||||
guests.push(guest);
|
zones.push(zone);
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
},
|
},
|
||||||
@ -111,19 +111,19 @@ impl MultiMetricCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut metrics = Vec::new();
|
let mut metrics = Vec::new();
|
||||||
for guest in &guests {
|
for zone in &zones {
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if state.status() != GuestStatus::Started {
|
if state.status() != ZoneStatus::Started {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let root = timeout(
|
let root = timeout(
|
||||||
Duration::from_secs(5),
|
Duration::from_secs(5),
|
||||||
self.client.read_guest_metrics(ReadGuestMetricsRequest {
|
self.client.read_zone_metrics(ReadZoneMetricsRequest {
|
||||||
guest_id: guest.id.clone(),
|
zone_id: zone.id.clone(),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -132,16 +132,16 @@ impl MultiMetricCollector {
|
|||||||
.map(|x| x.into_inner())
|
.map(|x| x.into_inner())
|
||||||
.and_then(|x| x.root);
|
.and_then(|x| x.root);
|
||||||
metrics.push(MetricState {
|
metrics.push(MetricState {
|
||||||
guest: guest.clone(),
|
zone: zone.clone(),
|
||||||
root,
|
root,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
sender.send(MultiMetricState { guests: metrics }).await?;
|
sender.send(MultiMetricState { zones: metrics }).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup<'a>(node: &'a GuestMetricNode, path: &str) -> Option<&'a GuestMetricNode> {
|
pub fn lookup<'a>(node: &'a ZoneMetricNode, path: &str) -> Option<&'a ZoneMetricNode> {
|
||||||
let Some((what, b)) = path.split_once('/') else {
|
let Some((what, b)) = path.split_once('/') else {
|
||||||
return node.children.iter().find(|x| x.name == path);
|
return node.children.iter().find(|x| x.name == path);
|
||||||
};
|
};
|
||||||
@ -149,7 +149,7 @@ pub fn lookup<'a>(node: &'a GuestMetricNode, path: &str) -> Option<&'a GuestMetr
|
|||||||
return lookup(next, b);
|
return lookup(next, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup_metric_value(node: &GuestMetricNode, path: &str) -> Option<String> {
|
pub fn lookup_metric_value(node: &ZoneMetricNode, path: &str) -> Option<String> {
|
||||||
lookup(node, path).and_then(|x| {
|
lookup(node, path).and_then(|x| {
|
||||||
x.value
|
x.value
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -17,9 +17,9 @@ circular-buffer = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.12" }
|
krata = { path = "../krata", version = "^0.0.13" }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.12" }
|
krata-oci = { path = "../oci", version = "^0.0.13" }
|
||||||
krata-runtime = { path = "../runtime", version = "^0.0.12" }
|
krata-runtime = { path = "../runtime", version = "^0.0.13" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
prost = { workspace = true }
|
prost = { workspace = true }
|
||||||
redb = { workspace = true }
|
redb = { workspace = true }
|
||||||
|
@ -13,7 +13,7 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::glt::GuestLookupTable;
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
|
||||||
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
||||||
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
||||||
@ -24,7 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonConsoleHandle {
|
pub struct DaemonConsoleHandle {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
sender: Sender<(u32, Vec<u8>)>,
|
sender: Sender<(u32, Vec<u8>)>,
|
||||||
@ -84,7 +84,7 @@ impl Drop for DaemonConsoleHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonConsole {
|
pub struct DaemonConsole {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||||
@ -93,7 +93,7 @@ pub struct DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonConsole {
|
impl DaemonConsole {
|
||||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
|
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonConsole> {
|
||||||
let (service, sender, receiver) =
|
let (service, sender, receiver) =
|
||||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||||
let task = service.launch().await?;
|
let task = service.launch().await?;
|
||||||
|
@ -7,16 +7,16 @@ use krata::{
|
|||||||
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
||||||
},
|
},
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestState, GuestStatus, OciImageFormat},
|
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
|
||||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
|
||||||
DeviceInfo, ExecGuestReply, ExecGuestRequest, HostCpuTopologyInfo,
|
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
|
||||||
HostCpuTopologyReply, HostCpuTopologyRequest, HostPowerManagementPolicy,
|
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
|
||||||
IdentifyHostReply, IdentifyHostRequest, ListDevicesReply, ListDevicesRequest,
|
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
||||||
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
|
||||||
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
|
||||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
ZoneConsoleRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -37,9 +37,9 @@ use tonic::{Request, Response, Status, Streaming};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
|
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
|
||||||
devices::DaemonDeviceManager, event::DaemonEventContext, glt::GuestLookupTable,
|
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
|
||||||
idm::DaemonIdmHandle, metrics::idm_metric_to_api, oci::convert_oci_progress,
|
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct ApiError {
|
pub struct ApiError {
|
||||||
@ -62,13 +62,13 @@ impl From<ApiError> for Status {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonControlService {
|
pub struct DaemonControlService {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
}
|
}
|
||||||
@ -76,13 +76,13 @@ pub struct DaemonControlService {
|
|||||||
impl DaemonControlService {
|
impl DaemonControlService {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -92,8 +92,8 @@ impl DaemonControlService {
|
|||||||
events,
|
events,
|
||||||
console,
|
console,
|
||||||
idm,
|
idm,
|
||||||
guests,
|
zones,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
packer,
|
packer,
|
||||||
runtime,
|
runtime,
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ impl DaemonControlService {
|
|||||||
|
|
||||||
enum ConsoleDataSelect {
|
enum ConsoleDataSelect {
|
||||||
Read(Option<Vec<u8>>),
|
Read(Option<Vec<u8>>),
|
||||||
Write(Option<Result<ConsoleDataRequest, tonic::Status>>),
|
Write(Option<Result<ZoneConsoleRequest, Status>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
enum PullImageSelect {
|
enum PullImageSelect {
|
||||||
@ -112,11 +112,11 @@ enum PullImageSelect {
|
|||||||
|
|
||||||
#[tonic::async_trait]
|
#[tonic::async_trait]
|
||||||
impl ControlService for DaemonControlService {
|
impl ControlService for DaemonControlService {
|
||||||
type ExecGuestStream =
|
type ExecZoneStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
type ConsoleDataStream =
|
type AttachZoneConsoleStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
type PullImageStream =
|
type PullImageStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
||||||
@ -139,25 +139,25 @@ impl ControlService for DaemonControlService {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_guest(
|
async fn create_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<CreateGuestRequest>,
|
request: Request<CreateZoneRequest>,
|
||||||
) -> Result<Response<CreateGuestReply>, Status> {
|
) -> Result<Response<CreateZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let Some(spec) = request.spec else {
|
let Some(spec) = request.spec else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest spec not provided".to_string(),
|
message: "zone spec not provided".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
self.guests
|
self.zones
|
||||||
.update(
|
.update(
|
||||||
uuid,
|
uuid,
|
||||||
Guest {
|
Zone {
|
||||||
id: uuid.to_string(),
|
id: uuid.to_string(),
|
||||||
state: Some(GuestState {
|
state: Some(ZoneState {
|
||||||
status: GuestStatus::Starting.into(),
|
status: ZoneStatus::Starting.into(),
|
||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
@ -169,21 +169,21 @@ impl ControlService for DaemonControlService {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
self.guest_reconciler_notify
|
self.zone_reconciler_notify
|
||||||
.send(uuid)
|
.send(uuid)
|
||||||
.await
|
.await
|
||||||
.map_err(|x| ApiError {
|
.map_err(|x| ApiError {
|
||||||
message: x.to_string(),
|
message: x.to_string(),
|
||||||
})?;
|
})?;
|
||||||
Ok(Response::new(CreateGuestReply {
|
Ok(Response::new(CreateZoneReply {
|
||||||
guest_id: uuid.to_string(),
|
zone_id: uuid.to_string(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exec_guest(
|
async fn exec_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<Streaming<ExecGuestRequest>>,
|
request: Request<Streaming<ExecZoneRequest>>,
|
||||||
) -> Result<Response<Self::ExecGuestStream>, Status> {
|
) -> Result<Response<Self::ExecZoneStream>, Status> {
|
||||||
let mut input = request.into_inner();
|
let mut input = request.into_inner();
|
||||||
let Some(request) = input.next().await else {
|
let Some(request) = input.next().await else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
@ -200,7 +200,7 @@ impl ControlService for DaemonControlService {
|
|||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
@ -232,7 +232,7 @@ impl ControlService for DaemonControlService {
|
|||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = input.next() => if let Some(update) = x {
|
x = input.next() => if let Some(update) = x {
|
||||||
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
|
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||||
message: error.to_string()
|
message: error.to_string()
|
||||||
}.into());
|
}.into());
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ impl ControlService for DaemonControlService {
|
|||||||
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
let reply = ExecGuestReply {
|
let reply = ExecZoneReply {
|
||||||
exited: update.exited,
|
exited: update.exited,
|
||||||
error: update.error,
|
error: update.error,
|
||||||
exit_code: update.exit_code,
|
exit_code: update.exit_code,
|
||||||
@ -269,80 +269,80 @@ impl ControlService for DaemonControlService {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
|
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn destroy_guest(
|
async fn destroy_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<DestroyGuestRequest>,
|
request: Request<DestroyZoneRequest>,
|
||||||
) -> Result<Response<DestroyGuestReply>, Status> {
|
) -> Result<Response<DestroyZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let Some(mut guest) = self.guests.read(uuid).await.map_err(ApiError::from)? else {
|
let Some(mut zone) = self.zones.read(uuid).await.map_err(ApiError::from)? else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest not found".to_string(),
|
message: "zone not found".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
|
|
||||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||||
|
|
||||||
if guest.state.as_ref().unwrap().status() == GuestStatus::Destroyed {
|
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest already destroyed".to_string(),
|
message: "zone already destroyed".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
guest.state.as_mut().unwrap().status = GuestStatus::Destroying.into();
|
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
|
||||||
self.guests
|
self.zones
|
||||||
.update(uuid, guest)
|
.update(uuid, zone)
|
||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
self.guest_reconciler_notify
|
self.zone_reconciler_notify
|
||||||
.send(uuid)
|
.send(uuid)
|
||||||
.await
|
.await
|
||||||
.map_err(|x| ApiError {
|
.map_err(|x| ApiError {
|
||||||
message: x.to_string(),
|
message: x.to_string(),
|
||||||
})?;
|
})?;
|
||||||
Ok(Response::new(DestroyGuestReply {}))
|
Ok(Response::new(DestroyZoneReply {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_guests(
|
async fn list_zones(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ListGuestsRequest>,
|
request: Request<ListZonesRequest>,
|
||||||
) -> Result<Response<ListGuestsReply>, Status> {
|
) -> Result<Response<ListZonesReply>, Status> {
|
||||||
let _ = request.into_inner();
|
let _ = request.into_inner();
|
||||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||||
let guests = guests.into_values().collect::<Vec<Guest>>();
|
let zones = zones.into_values().collect::<Vec<Zone>>();
|
||||||
Ok(Response::new(ListGuestsReply { guests }))
|
Ok(Response::new(ListZonesReply { zones }))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn resolve_guest(
|
async fn resolve_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ResolveGuestRequest>,
|
request: Request<ResolveZoneRequest>,
|
||||||
) -> Result<Response<ResolveGuestReply>, Status> {
|
) -> Result<Response<ResolveZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||||
let guests = guests
|
let zones = zones
|
||||||
.into_values()
|
.into_values()
|
||||||
.filter(|x| {
|
.filter(|x| {
|
||||||
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
||||||
(!request.name.is_empty() && comparison_spec.name == request.name)
|
(!request.name.is_empty() && comparison_spec.name == request.name)
|
||||||
|| x.id == request.name
|
|| x.id == request.name
|
||||||
})
|
})
|
||||||
.collect::<Vec<Guest>>();
|
.collect::<Vec<Zone>>();
|
||||||
Ok(Response::new(ResolveGuestReply {
|
Ok(Response::new(ResolveZoneReply {
|
||||||
guest: guests.first().cloned(),
|
zone: zones.first().cloned(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn console_data(
|
async fn attach_zone_console(
|
||||||
&self,
|
&self,
|
||||||
request: Request<Streaming<ConsoleDataRequest>>,
|
request: Request<Streaming<ZoneConsoleRequest>>,
|
||||||
) -> Result<Response<Self::ConsoleDataStream>, Status> {
|
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
|
||||||
let mut input = request.into_inner();
|
let mut input = request.into_inner();
|
||||||
let Some(request) = input.next().await else {
|
let Some(request) = input.next().await else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
@ -351,7 +351,7 @@ impl ControlService for DaemonControlService {
|
|||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
let request = request?;
|
let request = request?;
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let (sender, mut receiver) = channel(100);
|
let (sender, mut receiver) = channel(100);
|
||||||
@ -364,7 +364,7 @@ impl ControlService for DaemonControlService {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let output = try_stream! {
|
let output = try_stream! {
|
||||||
yield ConsoleDataReply { data: console.initial.clone(), };
|
yield ZoneConsoleReply { data: console.initial.clone(), };
|
||||||
loop {
|
loop {
|
||||||
let what = select! {
|
let what = select! {
|
||||||
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
||||||
@ -373,7 +373,7 @@ impl ControlService for DaemonControlService {
|
|||||||
|
|
||||||
match what {
|
match what {
|
||||||
ConsoleDataSelect::Read(Some(data)) => {
|
ConsoleDataSelect::Read(Some(data)) => {
|
||||||
yield ConsoleDataReply { data, };
|
yield ZoneConsoleReply { data, };
|
||||||
},
|
},
|
||||||
|
|
||||||
ConsoleDataSelect::Read(None) => {
|
ConsoleDataSelect::Read(None) => {
|
||||||
@ -396,15 +396,17 @@ impl ControlService for DaemonControlService {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::ConsoleDataStream))
|
Ok(Response::new(
|
||||||
|
Box::pin(output) as Self::AttachZoneConsoleStream
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_guest_metrics(
|
async fn read_zone_metrics(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ReadGuestMetricsRequest>,
|
request: Request<ReadZoneMetricsRequest>,
|
||||||
) -> Result<Response<ReadGuestMetricsReply>, Status> {
|
) -> Result<Response<ReadZoneMetricsReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
@ -420,7 +422,7 @@ impl ControlService for DaemonControlService {
|
|||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut reply = ReadGuestMetricsReply::default();
|
let mut reply = ReadZoneMetricsReply::default();
|
||||||
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
||||||
reply.root = metrics.root.map(idm_metric_to_api);
|
reply.root = metrics.root.map(idm_metric_to_api);
|
||||||
}
|
}
|
||||||
|
@ -1,66 +1,66 @@
|
|||||||
use std::{collections::HashMap, path::Path, sync::Arc};
|
use std::{collections::HashMap, path::Path, sync::Arc};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::common::Guest;
|
use krata::v1::common::Zone;
|
||||||
use log::error;
|
use log::error;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use redb::{Database, ReadableTable, TableDefinition};
|
use redb::{Database, ReadableTable, TableDefinition};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
const GUESTS: TableDefinition<u128, &[u8]> = TableDefinition::new("guests");
|
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestStore {
|
pub struct ZoneStore {
|
||||||
database: Arc<Database>,
|
database: Arc<Database>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestStore {
|
impl ZoneStore {
|
||||||
pub fn open(path: &Path) -> Result<Self> {
|
pub fn open(path: &Path) -> Result<Self> {
|
||||||
let database = Database::create(path)?;
|
let database = Database::create(path)?;
|
||||||
let write = database.begin_write()?;
|
let write = database.begin_write()?;
|
||||||
let _ = write.open_table(GUESTS);
|
let _ = write.open_table(ZONES);
|
||||||
write.commit()?;
|
write.commit()?;
|
||||||
Ok(GuestStore {
|
Ok(ZoneStore {
|
||||||
database: Arc::new(database),
|
database: Arc::new(database),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(&self, id: Uuid) -> Result<Option<Guest>> {
|
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
||||||
let read = self.database.begin_read()?;
|
let read = self.database.begin_read()?;
|
||||||
let table = read.open_table(GUESTS)?;
|
let table = read.open_table(ZONES)?;
|
||||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let bytes = entry.value();
|
let bytes = entry.value();
|
||||||
Ok(Some(Guest::decode(bytes)?))
|
Ok(Some(Zone::decode(bytes)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<HashMap<Uuid, Guest>> {
|
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
||||||
let mut guests: HashMap<Uuid, Guest> = HashMap::new();
|
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||||
let read = self.database.begin_read()?;
|
let read = self.database.begin_read()?;
|
||||||
let table = read.open_table(GUESTS)?;
|
let table = read.open_table(ZONES)?;
|
||||||
for result in table.iter()? {
|
for result in table.iter()? {
|
||||||
let (key, value) = result?;
|
let (key, value) = result?;
|
||||||
let uuid = Uuid::from_u128_le(key.value());
|
let uuid = Uuid::from_u128_le(key.value());
|
||||||
let state = match Guest::decode(value.value()) {
|
let state = match Zone::decode(value.value()) {
|
||||||
Ok(state) => state,
|
Ok(state) => state,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!(
|
error!(
|
||||||
"found invalid guest state in database for uuid {}: {}",
|
"found invalid zone state in database for uuid {}: {}",
|
||||||
uuid, error
|
uuid, error
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
guests.insert(uuid, state);
|
zones.insert(uuid, state);
|
||||||
}
|
}
|
||||||
Ok(guests)
|
Ok(zones)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update(&self, id: Uuid, entry: Guest) -> Result<()> {
|
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(GUESTS)?;
|
let mut table = write.open_table(ZONES)?;
|
||||||
let bytes = entry.encode_to_vec();
|
let bytes = entry.encode_to_vec();
|
||||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||||
}
|
}
|
||||||
@ -71,7 +71,7 @@ impl GuestStore {
|
|||||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(GUESTS)?;
|
let mut table = write.open_table(ZONES)?;
|
||||||
table.remove(id.to_u128_le())?;
|
table.remove(id.to_u128_le())?;
|
||||||
}
|
}
|
||||||
write.commit()?;
|
write.commit()?;
|
||||||
|
@ -31,7 +31,7 @@ impl DaemonDeviceManager {
|
|||||||
let mut devices = self.devices.write().await;
|
let mut devices = self.devices.write().await;
|
||||||
let Some(state) = devices.get_mut(device) else {
|
let Some(state) = devices.get_mut(device) else {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
"unable to claim unknown device '{}' for guest {}",
|
"unable to claim unknown device '{}' for zone {}",
|
||||||
device,
|
device,
|
||||||
uuid
|
uuid
|
||||||
));
|
));
|
||||||
@ -39,7 +39,7 @@ impl DaemonDeviceManager {
|
|||||||
|
|
||||||
if let Some(owner) = state.owner {
|
if let Some(owner) = state.owner {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
"unable to claim device '{}' for guest {}: already claimed by {}",
|
"unable to claim device '{}' for zone {}: already claimed by {}",
|
||||||
device,
|
device,
|
||||||
uuid,
|
uuid,
|
||||||
owner
|
owner
|
||||||
@ -92,7 +92,7 @@ impl DaemonDeviceManager {
|
|||||||
|
|
||||||
for (name, uuid) in &claims {
|
for (name, uuid) in &claims {
|
||||||
if !devices.contains_key(name) {
|
if !devices.contains_key(name) {
|
||||||
warn!("unknown device '{}' assigned to guest {}", name, uuid);
|
warn!("unknown device '{}' assigned to zone {}", name, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,10 +4,12 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::ZoneExitInfo;
|
||||||
use krata::{
|
use krata::{
|
||||||
idm::{internal::event::Event as EventType, internal::Event},
|
idm::{internal::event::Event as EventType, internal::Event},
|
||||||
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
v1::common::{ZoneState, ZoneStatus},
|
||||||
};
|
};
|
||||||
use log::{error, warn};
|
use log::{error, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
@ -21,8 +23,6 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{db::GuestStore, idm::DaemonIdmHandle};
|
|
||||||
|
|
||||||
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
||||||
|
|
||||||
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||||
@ -45,8 +45,8 @@ impl DaemonEventContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonEventGenerator {
|
pub struct DaemonEventGenerator {
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
feed: broadcast::Receiver<DaemonEvent>,
|
feed: broadcast::Receiver<DaemonEvent>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
||||||
@ -57,15 +57,15 @@ pub struct DaemonEventGenerator {
|
|||||||
|
|
||||||
impl DaemonEventGenerator {
|
impl DaemonEventGenerator {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
||||||
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
||||||
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
||||||
let generator = DaemonEventGenerator {
|
let generator = DaemonEventGenerator {
|
||||||
guests,
|
zones,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
feed: sender.subscribe(),
|
feed: sender.subscribe(),
|
||||||
idm,
|
idm,
|
||||||
idms: HashMap::new(),
|
idms: HashMap::new(),
|
||||||
@ -78,20 +78,20 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
||||||
let DaemonEvent::GuestChanged(changed) = event;
|
let DaemonEvent::ZoneChanged(changed) = event;
|
||||||
let Some(ref guest) = changed.guest else {
|
let Some(ref zone) = changed.zone else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let status = state.status();
|
let status = state.status();
|
||||||
let id = Uuid::from_str(&guest.id)?;
|
let id = Uuid::from_str(&zone.id)?;
|
||||||
let domid = state.domid;
|
let domid = state.domid;
|
||||||
match status {
|
match status {
|
||||||
GuestStatus::Started => {
|
ZoneStatus::Started => {
|
||||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||||
let client = self.idm.client_by_domid(domid).await?;
|
let client = self.idm.client_by_domid(domid).await?;
|
||||||
let mut receiver = client.subscribe().await?;
|
let mut receiver = client.subscribe().await?;
|
||||||
@ -111,7 +111,7 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GuestStatus::Destroyed => {
|
ZoneStatus::Destroyed => {
|
||||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||||
handle.abort();
|
handle.abort();
|
||||||
}
|
}
|
||||||
@ -130,18 +130,18 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||||
if let Some(mut guest) = self.guests.read(id).await? {
|
if let Some(mut zone) = self.zones.read(id).await? {
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Exited.into(),
|
status: ZoneStatus::Exited.into(),
|
||||||
network: guest.state.clone().unwrap_or_default().network,
|
network: zone.state.clone().unwrap_or_default().network,
|
||||||
exit_info: Some(GuestExitInfo { code }),
|
exit_info: Some(ZoneExitInfo { code }),
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
|
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||||
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
|
|
||||||
self.guests.update(id, guest).await?;
|
self.zones.update(id, zone).await?;
|
||||||
self.guest_reconciler_notify.send(id).await?;
|
self.zone_reconciler_notify.send(id).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -24,14 +24,14 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::glt::GuestLookupTable;
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
|
||||||
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
||||||
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonIdmHandle {
|
pub struct DaemonIdmHandle {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -72,7 +72,7 @@ pub struct DaemonIdmSnoopPacket {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonIdm {
|
pub struct DaemonIdm {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -84,7 +84,7 @@ pub struct DaemonIdm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonIdm {
|
impl DaemonIdm {
|
||||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
|
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
|
||||||
let (service, tx_raw_sender, rx_receiver) =
|
let (service, tx_raw_sender, rx_receiver) =
|
||||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||||
let (tx_sender, tx_receiver) = channel(100);
|
let (tx_sender, tx_receiver) = channel(100);
|
||||||
|
@ -4,16 +4,15 @@ use anyhow::{anyhow, Result};
|
|||||||
use config::DaemonConfig;
|
use config::DaemonConfig;
|
||||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||||
use control::DaemonControlService;
|
use control::DaemonControlService;
|
||||||
use db::GuestStore;
|
use db::ZoneStore;
|
||||||
use devices::DaemonDeviceManager;
|
use devices::DaemonDeviceManager;
|
||||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||||
use glt::GuestLookupTable;
|
|
||||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||||
use kratart::Runtime;
|
use kratart::Runtime;
|
||||||
use log::info;
|
use log::info;
|
||||||
use reconcile::guest::GuestReconciler;
|
use reconcile::zone::ZoneReconciler;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs,
|
fs,
|
||||||
net::UnixListener,
|
net::UnixListener,
|
||||||
@ -23,6 +22,7 @@ use tokio::{
|
|||||||
use tokio_stream::wrappers::UnixListenerStream;
|
use tokio_stream::wrappers::UnixListenerStream;
|
||||||
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
use zlt::ZoneLookupTable;
|
||||||
|
|
||||||
pub mod command;
|
pub mod command;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
@ -31,21 +31,21 @@ pub mod control;
|
|||||||
pub mod db;
|
pub mod db;
|
||||||
pub mod devices;
|
pub mod devices;
|
||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod glt;
|
|
||||||
pub mod idm;
|
pub mod idm;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
pub mod reconcile;
|
pub mod reconcile;
|
||||||
|
pub mod zlt;
|
||||||
|
|
||||||
pub struct Daemon {
|
pub struct Daemon {
|
||||||
store: String,
|
store: String,
|
||||||
_config: Arc<DaemonConfig>,
|
_config: Arc<DaemonConfig>,
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
guest_reconciler_task: JoinHandle<()>,
|
zone_reconciler_task: JoinHandle<()>,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
generator_task: JoinHandle<()>,
|
generator_task: JoinHandle<()>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
@ -53,7 +53,7 @@ pub struct Daemon {
|
|||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GUEST_RECONCILER_QUEUE_LEN: usize = 1000;
|
const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
|
||||||
|
|
||||||
impl Daemon {
|
impl Daemon {
|
||||||
pub async fn new(store: String) -> Result<Self> {
|
pub async fn new(store: String) -> Result<Self> {
|
||||||
@ -89,40 +89,40 @@ impl Daemon {
|
|||||||
generated
|
generated
|
||||||
};
|
};
|
||||||
|
|
||||||
let initrd_path = detect_guest_path(&store, "initrd")?;
|
let initrd_path = detect_zone_path(&store, "initrd")?;
|
||||||
let kernel_path = detect_guest_path(&store, "kernel")?;
|
let kernel_path = detect_zone_path(&store, "kernel")?;
|
||||||
let addons_path = detect_guest_path(&store, "addons.squashfs")?;
|
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
|
||||||
|
|
||||||
let seed = config.oci.seed.clone().map(PathBuf::from);
|
let seed = config.oci.seed.clone().map(PathBuf::from);
|
||||||
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
||||||
let runtime = Runtime::new(host_uuid).await?;
|
let runtime = Runtime::new(host_uuid).await?;
|
||||||
let glt = GuestLookupTable::new(0, host_uuid);
|
let glt = ZoneLookupTable::new(0, host_uuid);
|
||||||
let guests_db_path = format!("{}/guests.db", store);
|
let zones_db_path = format!("{}/zones.db", store);
|
||||||
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
|
||||||
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
||||||
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
||||||
let idm = DaemonIdm::new(glt.clone()).await?;
|
let idm = DaemonIdm::new(glt.clone()).await?;
|
||||||
let idm = idm.launch().await?;
|
let idm = idm.launch().await?;
|
||||||
let console = DaemonConsole::new(glt.clone()).await?;
|
let console = DaemonConsole::new(glt.clone()).await?;
|
||||||
let console = console.launch().await?;
|
let console = console.launch().await?;
|
||||||
let (events, generator) =
|
let (events, generator) =
|
||||||
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
||||||
.await?;
|
.await?;
|
||||||
let runtime_for_reconciler = runtime.dupe().await?;
|
let runtime_for_reconciler = runtime.dupe().await?;
|
||||||
let guest_reconciler = GuestReconciler::new(
|
let zone_reconciler = ZoneReconciler::new(
|
||||||
devices.clone(),
|
devices.clone(),
|
||||||
glt.clone(),
|
glt.clone(),
|
||||||
guests.clone(),
|
zones.clone(),
|
||||||
events.clone(),
|
events.clone(),
|
||||||
runtime_for_reconciler,
|
runtime_for_reconciler,
|
||||||
packer.clone(),
|
packer.clone(),
|
||||||
guest_reconciler_notify.clone(),
|
zone_reconciler_notify.clone(),
|
||||||
kernel_path,
|
kernel_path,
|
||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path,
|
addons_path,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
||||||
let generator_task = generator.launch().await?;
|
let generator_task = generator.launch().await?;
|
||||||
|
|
||||||
// TODO: Create a way of abstracting early init tasks in kratad.
|
// TODO: Create a way of abstracting early init tasks in kratad.
|
||||||
@ -139,10 +139,10 @@ impl Daemon {
|
|||||||
_config: config,
|
_config: config,
|
||||||
glt,
|
glt,
|
||||||
devices,
|
devices,
|
||||||
guests,
|
zones,
|
||||||
events,
|
events,
|
||||||
guest_reconciler_task,
|
zone_reconciler_task,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
generator_task,
|
generator_task,
|
||||||
idm,
|
idm,
|
||||||
console,
|
console,
|
||||||
@ -158,8 +158,8 @@ impl Daemon {
|
|||||||
self.events.clone(),
|
self.events.clone(),
|
||||||
self.console.clone(),
|
self.console.clone(),
|
||||||
self.idm.clone(),
|
self.idm.clone(),
|
||||||
self.guests.clone(),
|
self.zones.clone(),
|
||||||
self.guest_reconciler_notify.clone(),
|
self.zone_reconciler_notify.clone(),
|
||||||
self.packer.clone(),
|
self.packer.clone(),
|
||||||
self.runtime.clone(),
|
self.runtime.clone(),
|
||||||
);
|
);
|
||||||
@ -214,20 +214,20 @@ impl Daemon {
|
|||||||
|
|
||||||
impl Drop for Daemon {
|
impl Drop for Daemon {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.guest_reconciler_task.abort();
|
self.zone_reconciler_task.abort();
|
||||||
self.generator_task.abort();
|
self.generator_task.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn detect_guest_path(store: &str, name: &str) -> Result<PathBuf> {
|
fn detect_zone_path(store: &str, name: &str) -> Result<PathBuf> {
|
||||||
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
|
let mut path = PathBuf::from(format!("{}/zone/{}", store, name));
|
||||||
if path.is_file() {
|
if path.is_file() {
|
||||||
return Ok(path);
|
return Ok(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
|
path = PathBuf::from(format!("/usr/share/krata/zone/{}", name));
|
||||||
if path.is_file() {
|
if path.is_file() {
|
||||||
return Ok(path);
|
return Ok(path);
|
||||||
}
|
}
|
||||||
Err(anyhow!("unable to find required guest file: {}", name))
|
Err(anyhow!("unable to find required zone file: {}", name))
|
||||||
}
|
}
|
||||||
|
@ -1,20 +1,20 @@
|
|||||||
use krata::{
|
use krata::{
|
||||||
idm::internal::{MetricFormat, MetricNode},
|
idm::internal::{MetricFormat, MetricNode},
|
||||||
v1::common::{GuestMetricFormat, GuestMetricNode},
|
v1::common::{ZoneMetricFormat, ZoneMetricNode},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
|
fn idm_metric_format_to_api(format: MetricFormat) -> ZoneMetricFormat {
|
||||||
match format {
|
match format {
|
||||||
MetricFormat::Unknown => GuestMetricFormat::Unknown,
|
MetricFormat::Unknown => ZoneMetricFormat::Unknown,
|
||||||
MetricFormat::Bytes => GuestMetricFormat::Bytes,
|
MetricFormat::Bytes => ZoneMetricFormat::Bytes,
|
||||||
MetricFormat::Integer => GuestMetricFormat::Integer,
|
MetricFormat::Integer => ZoneMetricFormat::Integer,
|
||||||
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
MetricFormat::DurationSeconds => ZoneMetricFormat::DurationSeconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
|
pub fn idm_metric_to_api(node: MetricNode) -> ZoneMetricNode {
|
||||||
let format = node.format();
|
let format = node.format();
|
||||||
GuestMetricNode {
|
ZoneMetricNode {
|
||||||
name: node.name,
|
name: node.name,
|
||||||
value: node.value,
|
value: node.value,
|
||||||
format: idm_metric_format_to_api(format).into(),
|
format: idm_metric_format_to_api(format).into(),
|
||||||
|
@ -1 +1 @@
|
|||||||
pub mod guest;
|
pub mod zone;
|
||||||
|
@ -7,11 +7,11 @@ use std::{
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
|
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
|
||||||
control::GuestChangedEvent,
|
control::ZoneChangedEvent,
|
||||||
};
|
};
|
||||||
use krataoci::packer::service::OciPackerService;
|
use krataoci::packer::service::OciPackerService;
|
||||||
use kratart::{GuestInfo, Runtime};
|
use kratart::{Runtime, ZoneInfo};
|
||||||
use log::{error, info, trace, warn};
|
use log::{error, info, trace, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
@ -25,69 +25,69 @@ use tokio::{
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
db::GuestStore,
|
db::ZoneStore,
|
||||||
devices::DaemonDeviceManager,
|
devices::DaemonDeviceManager,
|
||||||
event::{DaemonEvent, DaemonEventContext},
|
event::{DaemonEvent, DaemonEventContext},
|
||||||
glt::GuestLookupTable,
|
zlt::ZoneLookupTable,
|
||||||
};
|
};
|
||||||
|
|
||||||
use self::start::GuestStarter;
|
use self::start::ZoneStarter;
|
||||||
|
|
||||||
mod start;
|
mod start;
|
||||||
|
|
||||||
const PARALLEL_LIMIT: u32 = 5;
|
const PARALLEL_LIMIT: u32 = 5;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum GuestReconcilerResult {
|
enum ZoneReconcilerResult {
|
||||||
Unchanged,
|
Unchanged,
|
||||||
Changed { rerun: bool },
|
Changed { rerun: bool },
|
||||||
}
|
}
|
||||||
|
|
||||||
struct GuestReconcilerEntry {
|
struct ZoneReconcilerEntry {
|
||||||
task: JoinHandle<()>,
|
task: JoinHandle<()>,
|
||||||
sender: Sender<()>,
|
sender: Sender<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for GuestReconcilerEntry {
|
impl Drop for ZoneReconcilerEntry {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.task.abort();
|
self.task.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestReconciler {
|
pub struct ZoneReconciler {
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
glt: GuestLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
addons_path: PathBuf,
|
addons_path: PathBuf,
|
||||||
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
reconcile_lock: Arc<RwLock<()>>,
|
zone_reconcile_lock: Arc<RwLock<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestReconciler {
|
impl ZoneReconciler {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
glt: GuestLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
modules_path: PathBuf,
|
modules_path: PathBuf,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
devices,
|
devices,
|
||||||
glt,
|
zlt,
|
||||||
guests,
|
zones,
|
||||||
events,
|
events,
|
||||||
runtime,
|
runtime,
|
||||||
packer,
|
packer,
|
||||||
@ -95,8 +95,8 @@ impl GuestReconciler {
|
|||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path: modules_path,
|
addons_path: modules_path,
|
||||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,13 +115,13 @@ impl GuestReconciler {
|
|||||||
|
|
||||||
Some(uuid) => {
|
Some(uuid) => {
|
||||||
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
||||||
error!("failed to start guest reconciler task {}: {}", uuid, error);
|
error!("failed to start zone reconciler task {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let map = self.tasks.lock().await;
|
let map = self.tasks.lock().await;
|
||||||
if let Some(entry) = map.get(&uuid) {
|
if let Some(entry) = map.get(&uuid) {
|
||||||
if let Err(error) = entry.sender.send(()).await {
|
if let Err(error) = entry.sender.send(()).await {
|
||||||
error!("failed to notify guest reconciler task {}: {}", uuid, error);
|
error!("failed to notify zone reconciler task {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,52 +138,52 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
||||||
let _permit = self.reconcile_lock.write().await;
|
let _permit = self.zone_reconcile_lock.write().await;
|
||||||
trace!("reconciling runtime");
|
trace!("reconciling runtime");
|
||||||
let runtime_guests = self.runtime.list().await?;
|
let runtime_zones = self.runtime.list().await?;
|
||||||
let stored_guests = self.guests.list().await?;
|
let stored_zones = self.zones.list().await?;
|
||||||
|
|
||||||
let non_existent_guests = runtime_guests
|
let non_existent_zones = runtime_zones
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
|
.filter(|x| !stored_zones.iter().any(|g| *g.0 == x.uuid))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for guest in non_existent_guests {
|
for zone in non_existent_zones {
|
||||||
warn!("destroying unknown runtime guest {}", guest.uuid);
|
warn!("destroying unknown runtime zone {}", zone.uuid);
|
||||||
if let Err(error) = self.runtime.destroy(guest.uuid).await {
|
if let Err(error) = self.runtime.destroy(zone.uuid).await {
|
||||||
error!(
|
error!(
|
||||||
"failed to destroy unknown runtime guest {}: {}",
|
"failed to destroy unknown runtime zone {}: {}",
|
||||||
guest.uuid, error
|
zone.uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.guests.remove(guest.uuid).await?;
|
self.zones.remove(zone.uuid).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut device_claims = HashMap::new();
|
let mut device_claims = HashMap::new();
|
||||||
|
|
||||||
for (uuid, mut stored_guest) in stored_guests {
|
for (uuid, mut stored_zone) in stored_zones {
|
||||||
let previous_guest = stored_guest.clone();
|
let previous_zone = stored_zone.clone();
|
||||||
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
|
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
||||||
match runtime_guest {
|
match runtime_zone {
|
||||||
None => {
|
None => {
|
||||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||||
if state.status() == GuestStatus::Started {
|
if state.status() == ZoneStatus::Started {
|
||||||
state.status = GuestStatus::Starting.into();
|
state.status = ZoneStatus::Starting.into();
|
||||||
}
|
}
|
||||||
stored_guest.state = Some(state);
|
stored_zone.state = Some(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(runtime) => {
|
Some(runtime) => {
|
||||||
self.glt.associate(uuid, runtime.domid).await;
|
self.zlt.associate(uuid, runtime.domid).await;
|
||||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||||
if let Some(code) = runtime.state.exit_code {
|
if let Some(code) = runtime.state.exit_code {
|
||||||
state.status = GuestStatus::Exited.into();
|
state.status = ZoneStatus::Exited.into();
|
||||||
state.exit_info = Some(GuestExitInfo { code });
|
state.exit_info = Some(ZoneExitInfo { code });
|
||||||
} else {
|
} else {
|
||||||
state.status = GuestStatus::Started.into();
|
state.status = ZoneStatus::Started.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
for device in &stored_guest
|
for device in &stored_zone
|
||||||
.spec
|
.spec
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.cloned()
|
.cloned()
|
||||||
@ -193,16 +193,16 @@ impl GuestReconciler {
|
|||||||
device_claims.insert(device.name.clone(), uuid);
|
device_claims.insert(device.name.clone(), uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
state.network = Some(guestinfo_to_networkstate(runtime));
|
state.network = Some(zoneinfo_to_networkstate(runtime));
|
||||||
stored_guest.state = Some(state);
|
stored_zone.state = Some(state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let changed = stored_guest != previous_guest;
|
let changed = stored_zone != previous_zone;
|
||||||
|
|
||||||
if changed || initial {
|
if changed || initial {
|
||||||
self.guests.update(uuid, stored_guest).await?;
|
self.zones.update(uuid, stored_zone).await?;
|
||||||
let _ = self.guest_reconciler_notify.try_send(uuid);
|
let _ = self.zone_reconciler_notify.try_send(uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,59 +212,59 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
||||||
let _runtime_reconcile_permit = self.reconcile_lock.read().await;
|
let _runtime_reconcile_permit = self.zone_reconcile_lock.read().await;
|
||||||
let Some(mut guest) = self.guests.read(uuid).await? else {
|
let Some(mut zone) = self.zones.read(uuid).await? else {
|
||||||
warn!(
|
warn!(
|
||||||
"notified of reconcile for guest {} but it didn't exist",
|
"notified of reconcile for zone {} but it didn't exist",
|
||||||
uuid
|
uuid
|
||||||
);
|
);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("reconciling guest {}", uuid);
|
info!("reconciling zone {}", uuid);
|
||||||
|
|
||||||
self.events
|
self.events
|
||||||
.send(DaemonEvent::GuestChanged(GuestChangedEvent {
|
.send(DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||||
guest: Some(guest.clone()),
|
zone: Some(zone.clone()),
|
||||||
}))?;
|
}))?;
|
||||||
|
|
||||||
let start_status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||||
let result = match start_status {
|
let result = match start_status {
|
||||||
GuestStatus::Starting => self.start(uuid, &mut guest).await,
|
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
|
||||||
GuestStatus::Exited => self.exited(&mut guest).await,
|
ZoneStatus::Exited => self.exited(&mut zone).await,
|
||||||
GuestStatus::Destroying => self.destroy(uuid, &mut guest).await,
|
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
|
||||||
_ => Ok(GuestReconcilerResult::Unchanged),
|
_ => Ok(ZoneReconcilerResult::Unchanged),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = match result {
|
let result = match result {
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||||
guest.state.as_mut().unwrap().status = GuestStatus::Failed.into();
|
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
|
||||||
guest.state.as_mut().unwrap().error_info = Some(GuestErrorInfo {
|
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
});
|
});
|
||||||
warn!("failed to start guest {}: {}", guest.id, error);
|
warn!("failed to start zone {}: {}", zone.id, error);
|
||||||
GuestReconcilerResult::Changed { rerun: false }
|
ZoneReconcilerResult::Changed { rerun: false }
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("reconciled guest {}", uuid);
|
info!("reconciled zone {}", uuid);
|
||||||
|
|
||||||
let status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||||
let destroyed = status == GuestStatus::Destroyed;
|
let destroyed = status == ZoneStatus::Destroyed;
|
||||||
|
|
||||||
let rerun = if let GuestReconcilerResult::Changed { rerun } = result {
|
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
||||||
let event = DaemonEvent::GuestChanged(GuestChangedEvent {
|
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||||
guest: Some(guest.clone()),
|
zone: Some(zone.clone()),
|
||||||
});
|
});
|
||||||
|
|
||||||
if destroyed {
|
if destroyed {
|
||||||
self.guests.remove(uuid).await?;
|
self.zones.remove(uuid).await?;
|
||||||
let mut map = self.tasks.lock().await;
|
let mut map = self.tasks.lock().await;
|
||||||
map.remove(&uuid);
|
map.remove(&uuid);
|
||||||
} else {
|
} else {
|
||||||
self.guests.update(uuid, guest.clone()).await?;
|
self.zones.update(uuid, zone.clone()).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.events.send(event)?;
|
self.events.send(event)?;
|
||||||
@ -276,50 +276,50 @@ impl GuestReconciler {
|
|||||||
Ok(rerun)
|
Ok(rerun)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let starter = GuestStarter {
|
let starter = ZoneStarter {
|
||||||
devices: &self.devices,
|
devices: &self.devices,
|
||||||
kernel_path: &self.kernel_path,
|
kernel_path: &self.kernel_path,
|
||||||
initrd_path: &self.initrd_path,
|
initrd_path: &self.initrd_path,
|
||||||
addons_path: &self.addons_path,
|
addons_path: &self.addons_path,
|
||||||
packer: &self.packer,
|
packer: &self.packer,
|
||||||
glt: &self.glt,
|
glt: &self.zlt,
|
||||||
runtime: &self.runtime,
|
runtime: &self.runtime,
|
||||||
};
|
};
|
||||||
starter.start(uuid, guest).await
|
starter.start(uuid, zone).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
if let Some(ref mut state) = guest.state {
|
if let Some(ref mut state) = zone.state {
|
||||||
state.set_status(GuestStatus::Destroying);
|
state.set_status(ZoneStatus::Destroying);
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: true })
|
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
||||||
} else {
|
} else {
|
||||||
Ok(GuestReconcilerResult::Unchanged)
|
Ok(ZoneReconcilerResult::Unchanged)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn destroy(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
if let Err(error) = self.runtime.destroy(uuid).await {
|
if let Err(error) = self.runtime.destroy(uuid).await {
|
||||||
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let domid = guest.state.as_ref().map(|x| x.domid);
|
let domid = zone.state.as_ref().map(|x| x.domid);
|
||||||
|
|
||||||
if let Some(domid) = domid {
|
if let Some(domid) = domid {
|
||||||
self.glt.remove(uuid, domid).await;
|
self.zlt.remove(uuid, domid).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("destroyed guest {}", uuid);
|
info!("destroyed zone {}", uuid);
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Destroyed.into(),
|
status: ZoneStatus::Destroyed.into(),
|
||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: self.glt.host_uuid().to_string(),
|
host: self.zlt.host_uuid().to_string(),
|
||||||
domid: domid.unwrap_or(u32::MAX),
|
domid: domid.unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
self.devices.release_all(uuid).await?;
|
self.devices.release_all(uuid).await?;
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
||||||
@ -333,7 +333,7 @@ impl GuestReconciler {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn launch_task(&self, uuid: Uuid) -> Result<GuestReconcilerEntry> {
|
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
let (sender, mut receiver) = channel(10);
|
let (sender, mut receiver) = channel(10);
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
@ -346,7 +346,7 @@ impl GuestReconciler {
|
|||||||
let rerun = match this.reconcile(uuid).await {
|
let rerun = match this.reconcile(uuid).await {
|
||||||
Ok(rerun) => rerun,
|
Ok(rerun) => rerun,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
error!("failed to reconcile zone {}: {}", uuid, error);
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -358,15 +358,15 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(GuestReconcilerEntry { task, sender })
|
Ok(ZoneReconcilerEntry { task, sender })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
|
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
|
||||||
GuestNetworkState {
|
ZoneNetworkState {
|
||||||
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
guest_mac: info.guest_mac.as_ref().cloned().unwrap_or_default(),
|
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
|
||||||
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
@ -6,40 +6,40 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use krata::launchcfg::LaunchPackedFormat;
|
use krata::launchcfg::LaunchPackedFormat;
|
||||||
use krata::v1::common::GuestOciImageSpec;
|
use krata::v1::common::ZoneOciImageSpec;
|
||||||
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
|
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
||||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
||||||
use kratart::{launch::GuestLaunchRequest, Runtime};
|
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
|
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
||||||
|
use crate::devices::DaemonDeviceManager;
|
||||||
|
use crate::{
|
||||||
|
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
|
||||||
|
zlt::ZoneLookupTable,
|
||||||
|
};
|
||||||
|
use krata::v1::common::zone_image_spec::Image;
|
||||||
use tokio::fs::{self, File};
|
use tokio::fs::{self, File};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio_tar::Archive;
|
use tokio_tar::Archive;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
pub struct ZoneStarter<'a> {
|
||||||
use crate::devices::DaemonDeviceManager;
|
|
||||||
use crate::{
|
|
||||||
glt::GuestLookupTable,
|
|
||||||
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct GuestStarter<'a> {
|
|
||||||
pub devices: &'a DaemonDeviceManager,
|
pub devices: &'a DaemonDeviceManager,
|
||||||
pub kernel_path: &'a Path,
|
pub kernel_path: &'a Path,
|
||||||
pub initrd_path: &'a Path,
|
pub initrd_path: &'a Path,
|
||||||
pub addons_path: &'a Path,
|
pub addons_path: &'a Path,
|
||||||
pub packer: &'a OciPackerService,
|
pub packer: &'a OciPackerService,
|
||||||
pub glt: &'a GuestLookupTable,
|
pub glt: &'a ZoneLookupTable,
|
||||||
pub runtime: &'a Runtime,
|
pub runtime: &'a Runtime,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestStarter<'_> {
|
impl ZoneStarter<'_> {
|
||||||
pub async fn oci_spec_tar_read_file(
|
pub async fn oci_spec_tar_read_file(
|
||||||
&self,
|
&self,
|
||||||
file: &Path,
|
file: &Path,
|
||||||
oci: &GuestOciImageSpec,
|
oci: &ZoneOciImageSpec,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
if oci.format() != OciImageFormat::Tar {
|
if oci.format() != OciImageFormat::Tar {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
@ -75,9 +75,9 @@ impl GuestStarter<'_> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let Some(ref spec) = guest.spec else {
|
let Some(ref spec) = zone.spec else {
|
||||||
return Err(anyhow!("guest spec not specified"));
|
return Err(anyhow!("zone spec not specified"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref image) = spec.image else {
|
let Some(ref image) = spec.image else {
|
||||||
@ -100,7 +100,7 @@ impl GuestStarter<'_> {
|
|||||||
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||||
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||||
OciImageFormat::Tar => {
|
OciImageFormat::Tar => {
|
||||||
return Err(anyhow!("tar image format is not supported for guests"));
|
return Err(anyhow!("tar image format is not supported for zones"));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -176,7 +176,7 @@ impl GuestStarter<'_> {
|
|||||||
|
|
||||||
let info = self
|
let info = self
|
||||||
.runtime
|
.runtime
|
||||||
.launch(GuestLaunchRequest {
|
.launch(ZoneLaunchRequest {
|
||||||
format: LaunchPackedFormat::Squashfs,
|
format: LaunchPackedFormat::Squashfs,
|
||||||
uuid: Some(uuid),
|
uuid: Some(uuid),
|
||||||
name: if spec.name.is_empty() {
|
name: if spec.name.is_empty() {
|
||||||
@ -201,17 +201,17 @@ impl GuestStarter<'_> {
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
self.glt.associate(uuid, info.domid).await;
|
self.glt.associate(uuid, info.domid).await;
|
||||||
info!("started guest {}", uuid);
|
info!("started zone {}", uuid);
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Started.into(),
|
status: ZoneStatus::Started.into(),
|
||||||
network: Some(guestinfo_to_networkstate(&info)),
|
network: Some(zoneinfo_to_networkstate(&info)),
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: self.glt.host_uuid().to_string(),
|
host: self.glt.host_uuid().to_string(),
|
||||||
domid: info.domid,
|
domid: info.domid,
|
||||||
});
|
});
|
||||||
success.store(true, Ordering::Release);
|
success.store(true, Ordering::Release);
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -3,18 +3,18 @@ use std::{collections::HashMap, sync::Arc};
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
struct GuestLookupTableState {
|
struct ZoneLookupTableState {
|
||||||
domid_to_uuid: HashMap<u32, Uuid>,
|
domid_to_uuid: HashMap<u32, Uuid>,
|
||||||
uuid_to_domid: HashMap<Uuid, u32>,
|
uuid_to_domid: HashMap<Uuid, u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLookupTableState {
|
impl ZoneLookupTableState {
|
||||||
pub fn new(host_uuid: Uuid) -> Self {
|
pub fn new(host_uuid: Uuid) -> Self {
|
||||||
let mut domid_to_uuid = HashMap::new();
|
let mut domid_to_uuid = HashMap::new();
|
||||||
let mut uuid_to_domid = HashMap::new();
|
let mut uuid_to_domid = HashMap::new();
|
||||||
domid_to_uuid.insert(0, host_uuid);
|
domid_to_uuid.insert(0, host_uuid);
|
||||||
uuid_to_domid.insert(host_uuid, 0);
|
uuid_to_domid.insert(host_uuid, 0);
|
||||||
GuestLookupTableState {
|
ZoneLookupTableState {
|
||||||
domid_to_uuid,
|
domid_to_uuid,
|
||||||
uuid_to_domid,
|
uuid_to_domid,
|
||||||
}
|
}
|
||||||
@ -22,18 +22,18 @@ impl GuestLookupTableState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestLookupTable {
|
pub struct ZoneLookupTable {
|
||||||
host_domid: u32,
|
host_domid: u32,
|
||||||
host_uuid: Uuid,
|
host_uuid: Uuid,
|
||||||
state: Arc<RwLock<GuestLookupTableState>>,
|
state: Arc<RwLock<ZoneLookupTableState>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLookupTable {
|
impl ZoneLookupTable {
|
||||||
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
||||||
GuestLookupTable {
|
ZoneLookupTable {
|
||||||
host_domid,
|
host_domid,
|
||||||
host_uuid,
|
host_uuid,
|
||||||
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
|
state: Arc::new(RwLock::new(ZoneLookupTableState::new(host_uuid))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
use anyhow::{anyhow, Result};
|
|
||||||
use env_logger::Env;
|
|
||||||
use krataguest::{death, init::GuestInit};
|
|
||||||
use log::error;
|
|
||||||
use std::env;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
env::set_var("RUST_BACKTRACE", "1");
|
|
||||||
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
|
|
||||||
if env::var("KRATA_UNSAFE_ALWAYS_ALLOW_INIT").unwrap_or("0".to_string()) != "1" {
|
|
||||||
let pid = std::process::id();
|
|
||||||
if pid > 3 {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"not running because the pid of {} indicates this is probably not \
|
|
||||||
the right context for the init daemon. \
|
|
||||||
run with KRATA_UNSAFE_ALWAYS_ALLOW_INIT=1 to bypass this check",
|
|
||||||
pid
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut guest = GuestInit::new();
|
|
||||||
if let Err(error) = guest.init().await {
|
|
||||||
error!("failed to initialize guest: {}", error);
|
|
||||||
death(127).await?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
death(1).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -15,6 +15,7 @@ bytes = { workspace = true }
|
|||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
once_cell = { workspace = true }
|
once_cell = { workspace = true }
|
||||||
|
pin-project-lite = { workspace = true }
|
||||||
prost = { workspace = true }
|
prost = { workspace = true }
|
||||||
prost-reflect = { workspace = true }
|
prost-reflect = { workspace = true }
|
||||||
prost-types = { workspace = true }
|
prost-types = { workspace = true }
|
||||||
@ -27,6 +28,8 @@ tower = { workspace = true }
|
|||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
|
|
||||||
[target.'cfg(unix)'.dependencies]
|
[target.'cfg(unix)'.dependencies]
|
||||||
|
hyper = { workspace = true }
|
||||||
|
hyper-util = { workspace = true }
|
||||||
nix = { workspace = true, features = ["term"] }
|
nix = { workspace = true, features = ["term"] }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
@ -8,29 +8,29 @@ option java_outer_classname = "CommonProto";
|
|||||||
|
|
||||||
import "google/protobuf/struct.proto";
|
import "google/protobuf/struct.proto";
|
||||||
|
|
||||||
message Guest {
|
message Zone {
|
||||||
string id = 1;
|
string id = 1;
|
||||||
GuestSpec spec = 2;
|
ZoneSpec spec = 2;
|
||||||
GuestState state = 3;
|
ZoneState state = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpec {
|
message ZoneSpec {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
GuestImageSpec image = 2;
|
ZoneImageSpec image = 2;
|
||||||
// If not specified, defaults to the daemon default kernel.
|
// If not specified, defaults to the daemon default kernel.
|
||||||
GuestImageSpec kernel = 3;
|
ZoneImageSpec kernel = 3;
|
||||||
// If not specified, defaults to the daemon default initrd.
|
// If not specified, defaults to the daemon default initrd.
|
||||||
GuestImageSpec initrd = 4;
|
ZoneImageSpec initrd = 4;
|
||||||
uint32 vcpus = 5;
|
uint32 vcpus = 5;
|
||||||
uint64 mem = 6;
|
uint64 mem = 6;
|
||||||
GuestTaskSpec task = 7;
|
ZoneTaskSpec task = 7;
|
||||||
repeated GuestSpecAnnotation annotations = 8;
|
repeated ZoneSpecAnnotation annotations = 8;
|
||||||
repeated GuestSpecDevice devices = 9;
|
repeated ZoneSpecDevice devices = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestImageSpec {
|
message ZoneImageSpec {
|
||||||
oneof image {
|
oneof image {
|
||||||
GuestOciImageSpec oci = 1;
|
ZoneOciImageSpec oci = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,77 +42,77 @@ enum OciImageFormat {
|
|||||||
OCI_IMAGE_FORMAT_TAR = 3;
|
OCI_IMAGE_FORMAT_TAR = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestOciImageSpec {
|
message ZoneOciImageSpec {
|
||||||
string digest = 1;
|
string digest = 1;
|
||||||
OciImageFormat format = 2;
|
OciImageFormat format = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpec {
|
message ZoneTaskSpec {
|
||||||
repeated GuestTaskSpecEnvVar environment = 1;
|
repeated ZoneTaskSpecEnvVar environment = 1;
|
||||||
repeated string command = 2;
|
repeated string command = 2;
|
||||||
string working_directory = 3;
|
string working_directory = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpecEnvVar {
|
message ZoneTaskSpecEnvVar {
|
||||||
string key = 1;
|
string key = 1;
|
||||||
string value = 2;
|
string value = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpecAnnotation {
|
message ZoneSpecAnnotation {
|
||||||
string key = 1;
|
string key = 1;
|
||||||
string value = 2;
|
string value = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpecDevice {
|
message ZoneSpecDevice {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestState {
|
message ZoneState {
|
||||||
GuestStatus status = 1;
|
ZoneStatus status = 1;
|
||||||
GuestNetworkState network = 2;
|
ZoneNetworkState network = 2;
|
||||||
GuestExitInfo exit_info = 3;
|
ZoneExitInfo exit_info = 3;
|
||||||
GuestErrorInfo error_info = 4;
|
ZoneErrorInfo error_info = 4;
|
||||||
string host = 5;
|
string host = 5;
|
||||||
uint32 domid = 6;
|
uint32 domid = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestStatus {
|
enum ZoneStatus {
|
||||||
GUEST_STATUS_UNKNOWN = 0;
|
ZONE_STATUS_UNKNOWN = 0;
|
||||||
GUEST_STATUS_STARTING = 1;
|
ZONE_STATUS_STARTING = 1;
|
||||||
GUEST_STATUS_STARTED = 2;
|
ZONE_STATUS_STARTED = 2;
|
||||||
GUEST_STATUS_EXITED = 3;
|
ZONE_STATUS_EXITED = 3;
|
||||||
GUEST_STATUS_DESTROYING = 4;
|
ZONE_STATUS_DESTROYING = 4;
|
||||||
GUEST_STATUS_DESTROYED = 5;
|
ZONE_STATUS_DESTROYED = 5;
|
||||||
GUEST_STATUS_FAILED = 6;
|
ZONE_STATUS_FAILED = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestNetworkState {
|
message ZoneNetworkState {
|
||||||
string guest_ipv4 = 1;
|
string zone_ipv4 = 1;
|
||||||
string guest_ipv6 = 2;
|
string zone_ipv6 = 2;
|
||||||
string guest_mac = 3;
|
string zone_mac = 3;
|
||||||
string gateway_ipv4 = 4;
|
string gateway_ipv4 = 4;
|
||||||
string gateway_ipv6 = 5;
|
string gateway_ipv6 = 5;
|
||||||
string gateway_mac = 6;
|
string gateway_mac = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestExitInfo {
|
message ZoneExitInfo {
|
||||||
int32 code = 1;
|
int32 code = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestErrorInfo {
|
message ZoneErrorInfo {
|
||||||
string message = 1;
|
string message = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestMetricNode {
|
message ZoneMetricNode {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
google.protobuf.Value value = 2;
|
google.protobuf.Value value = 2;
|
||||||
GuestMetricFormat format = 3;
|
ZoneMetricFormat format = 3;
|
||||||
repeated GuestMetricNode children = 4;
|
repeated ZoneMetricNode children = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestMetricFormat {
|
enum ZoneMetricFormat {
|
||||||
GUEST_METRIC_FORMAT_UNKNOWN = 0;
|
ZONE_METRIC_FORMAT_UNKNOWN = 0;
|
||||||
GUEST_METRIC_FORMAT_BYTES = 1;
|
ZONE_METRIC_FORMAT_BYTES = 1;
|
||||||
GUEST_METRIC_FORMAT_INTEGER = 2;
|
ZONE_METRIC_FORMAT_INTEGER = 2;
|
||||||
GUEST_METRIC_FORMAT_DURATION_SECONDS = 3;
|
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||||
}
|
}
|
||||||
|
@ -12,17 +12,17 @@ import "krata/v1/common.proto";
|
|||||||
service ControlService {
|
service ControlService {
|
||||||
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
||||||
|
|
||||||
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
|
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
|
||||||
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
|
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
|
||||||
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
|
rpc ResolveZone(ResolveZoneRequest) returns (ResolveZoneReply);
|
||||||
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
|
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
|
||||||
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
||||||
|
|
||||||
rpc ExecGuest(stream ExecGuestRequest) returns (stream ExecGuestReply);
|
rpc ExecZone(stream ExecZoneRequest) returns (stream ExecZoneReply);
|
||||||
|
|
||||||
|
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
|
||||||
|
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
|
||||||
|
|
||||||
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
|
|
||||||
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
|
|
||||||
|
|
||||||
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
||||||
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
||||||
|
|
||||||
@ -40,41 +40,41 @@ message IdentifyHostReply {
|
|||||||
string krata_version = 3;
|
string krata_version = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateGuestRequest {
|
message CreateZoneRequest {
|
||||||
krata.v1.common.GuestSpec spec = 1;
|
krata.v1.common.ZoneSpec spec = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateGuestReply {
|
message CreateZoneReply {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyGuestRequest {
|
message DestroyZoneRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyGuestReply {}
|
message DestroyZoneReply {}
|
||||||
|
|
||||||
message ResolveGuestRequest {
|
message ResolveZoneRequest {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ResolveGuestReply {
|
message ResolveZoneReply {
|
||||||
krata.v1.common.Guest guest = 1;
|
krata.v1.common.Zone Zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListGuestsRequest {}
|
message ListZonesRequest {}
|
||||||
|
|
||||||
message ListGuestsReply {
|
message ListZonesReply {
|
||||||
repeated krata.v1.common.Guest guests = 1;
|
repeated krata.v1.common.Zone Zones = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecGuestRequest {
|
message ExecZoneRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
krata.v1.common.GuestTaskSpec task = 2;
|
krata.v1.common.ZoneTaskSpec task = 2;
|
||||||
bytes data = 3;
|
bytes data = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecGuestReply {
|
message ExecZoneReply {
|
||||||
bool exited = 1;
|
bool exited = 1;
|
||||||
string error = 2;
|
string error = 2;
|
||||||
int32 exit_code = 3;
|
int32 exit_code = 3;
|
||||||
@ -82,12 +82,12 @@ message ExecGuestReply {
|
|||||||
bytes stderr = 5;
|
bytes stderr = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ConsoleDataRequest {
|
message ZoneConsoleRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
bytes data = 2;
|
bytes data = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ConsoleDataReply {
|
message ZoneConsoleReply {
|
||||||
bytes data = 1;
|
bytes data = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,20 +95,20 @@ message WatchEventsRequest {}
|
|||||||
|
|
||||||
message WatchEventsReply {
|
message WatchEventsReply {
|
||||||
oneof event {
|
oneof event {
|
||||||
GuestChangedEvent guest_changed = 1;
|
ZoneChangedEvent Zone_changed = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestChangedEvent {
|
message ZoneChangedEvent {
|
||||||
krata.v1.common.Guest guest = 1;
|
krata.v1.common.Zone Zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadGuestMetricsRequest {
|
message ReadZoneMetricsRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadGuestMetricsReply {
|
message ReadZoneMetricsReply {
|
||||||
krata.v1.common.GuestMetricNode root = 1;
|
krata.v1.common.ZoneMetricNode root = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SnoopIdmRequest {}
|
message SnoopIdmRequest {}
|
||||||
@ -205,9 +205,9 @@ message ListDevicesReply {
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum HostCpuTopologyClass {
|
enum HostCpuTopologyClass {
|
||||||
CPU_CLASS_STANDARD = 0;
|
HOST_CPU_TOPOLOGY_CLASS_STANDARD = 0;
|
||||||
CPU_CLASS_PERFORMANCE = 1;
|
HOST_CPU_TOPOLOGY_CLASS_PERFORMANCE = 1;
|
||||||
CPU_CLASS_EFFICIENCY = 2;
|
HOST_CPU_TOPOLOGY_CLASS_EFFICIENCY = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message HostCpuTopologyInfo {
|
message HostCpuTopologyInfo {
|
||||||
|
@ -1,14 +1,10 @@
|
|||||||
|
#[cfg(unix)]
|
||||||
|
use crate::unix::HyperUnixConnector;
|
||||||
use crate::{dial::ControlDialAddress, v1::control::control_service_client::ControlServiceClient};
|
use crate::{dial::ControlDialAddress, v1::control::control_service_client::ControlServiceClient};
|
||||||
#[cfg(not(unix))]
|
#[cfg(not(unix))]
|
||||||
use anyhow::anyhow;
|
use anyhow::anyhow;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
#[cfg(unix)]
|
|
||||||
use tokio::net::UnixStream;
|
|
||||||
#[cfg(unix)]
|
|
||||||
use tonic::transport::Uri;
|
|
||||||
use tonic::transport::{Channel, ClientTlsConfig, Endpoint};
|
use tonic::transport::{Channel, ClientTlsConfig, Endpoint};
|
||||||
#[cfg(unix)]
|
|
||||||
use tower::service_fn;
|
|
||||||
|
|
||||||
pub struct ControlClientProvider {}
|
pub struct ControlClientProvider {}
|
||||||
|
|
||||||
@ -52,10 +48,7 @@ impl ControlClientProvider {
|
|||||||
async fn dial_unix_socket(path: String) -> Result<Channel> {
|
async fn dial_unix_socket(path: String) -> Result<Channel> {
|
||||||
// This URL is not actually used but is required to be specified.
|
// This URL is not actually used but is required to be specified.
|
||||||
Ok(Endpoint::try_from(format!("unix://localhost/{}", path))?
|
Ok(Endpoint::try_from(format!("unix://localhost/{}", path))?
|
||||||
.connect_with_connector(service_fn(|uri: Uri| {
|
.connect_with_connector(HyperUnixConnector {})
|
||||||
let path = uri.path().to_string();
|
|
||||||
UnixStream::connect(path)
|
|
||||||
}))
|
|
||||||
.await?)
|
.await?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,9 @@ pub mod launchcfg;
|
|||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
pub mod ethtool;
|
pub mod ethtool;
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
pub mod unix;
|
||||||
|
|
||||||
pub static DESCRIPTOR_POOL: Lazy<DescriptorPool> = Lazy::new(|| {
|
pub static DESCRIPTOR_POOL: Lazy<DescriptorPool> = Lazy::new(|| {
|
||||||
DescriptorPool::decode(
|
DescriptorPool::decode(
|
||||||
include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")).as_ref(),
|
include_bytes!(concat!(env!("OUT_DIR"), "/file_descriptor_set.bin")).as_ref(),
|
||||||
|
73
crates/krata/src/unix.rs
Normal file
73
crates/krata/src/unix.rs
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::io::Error;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use hyper::rt::ReadBufCursor;
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
use pin_project_lite::pin_project;
|
||||||
|
use tokio::io::AsyncWrite;
|
||||||
|
use tokio::net::UnixStream;
|
||||||
|
use tonic::transport::Uri;
|
||||||
|
use tower::Service;
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct HyperUnixStream {
|
||||||
|
#[pin]
|
||||||
|
pub stream: UnixStream,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl hyper::rt::Read for HyperUnixStream {
|
||||||
|
fn poll_read(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: ReadBufCursor<'_>,
|
||||||
|
) -> Poll<Result<(), Error>> {
|
||||||
|
let mut tokio = TokioIo::new(self.project().stream);
|
||||||
|
Pin::new(&mut tokio).poll_read(cx, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl hyper::rt::Write for HyperUnixStream {
|
||||||
|
fn poll_write(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<Result<usize, Error>> {
|
||||||
|
self.project().stream.poll_write(cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
||||||
|
self.project().stream.poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
|
||||||
|
self.project().stream.poll_shutdown(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct HyperUnixConnector;
|
||||||
|
|
||||||
|
impl Service<Uri> for HyperUnixConnector {
|
||||||
|
type Response = HyperUnixStream;
|
||||||
|
type Error = Error;
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
type Future =
|
||||||
|
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
|
||||||
|
|
||||||
|
fn call(&mut self, req: Uri) -> Self::Future {
|
||||||
|
let fut = async move {
|
||||||
|
let path = req.path().to_string();
|
||||||
|
let stream = UnixStream::connect(path).await?;
|
||||||
|
Ok(HyperUnixStream { stream })
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::pin(fut)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
}
|
@ -16,7 +16,7 @@ clap = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
etherparse = { workspace = true }
|
etherparse = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.12" }
|
krata = { path = "../krata", version = "^0.0.13" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
|
@ -2,10 +2,10 @@ use anyhow::Result;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::Guest,
|
common::Zone,
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
ListGuestsRequest,
|
ListZonesRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -33,7 +33,7 @@ pub struct NetworkSide {
|
|||||||
pub struct NetworkMetadata {
|
pub struct NetworkMetadata {
|
||||||
pub domid: u32,
|
pub domid: u32,
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub guest: NetworkSide,
|
pub zone: NetworkSide,
|
||||||
pub gateway: NetworkSide,
|
pub gateway: NetworkSide,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,23 +60,23 @@ impl AutoNetworkWatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(&mut self) -> Result<Vec<NetworkMetadata>> {
|
pub async fn read(&mut self) -> Result<Vec<NetworkMetadata>> {
|
||||||
let mut all_guests: HashMap<Uuid, Guest> = HashMap::new();
|
let mut all_zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||||
for guest in self
|
for zone in self
|
||||||
.control
|
.control
|
||||||
.list_guests(ListGuestsRequest {})
|
.list_zones(ListZonesRequest {})
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests
|
.zones
|
||||||
{
|
{
|
||||||
let Ok(uuid) = Uuid::from_str(&guest.id) else {
|
let Ok(uuid) = Uuid::from_str(&zone.id) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
all_guests.insert(uuid, guest);
|
all_zones.insert(uuid, zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
||||||
for (uuid, guest) in &all_guests {
|
for (uuid, zone) in &all_zones {
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -88,15 +88,15 @@ impl AutoNetworkWatcher {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_ipv4_cidr) = Ipv4Cidr::from_str(&network.guest_ipv4) else {
|
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network.zone_ipv4) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_ipv6_cidr) = Ipv6Cidr::from_str(&network.guest_ipv6) else {
|
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network.zone_ipv6) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_mac) = EthernetAddress::from_str(&network.guest_mac) else {
|
let Ok(zone_mac) = EthernetAddress::from_str(&network.zone_mac) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -115,10 +115,10 @@ impl AutoNetworkWatcher {
|
|||||||
networks.push(NetworkMetadata {
|
networks.push(NetworkMetadata {
|
||||||
domid: state.domid,
|
domid: state.domid,
|
||||||
uuid: *uuid,
|
uuid: *uuid,
|
||||||
guest: NetworkSide {
|
zone: NetworkSide {
|
||||||
ipv4: guest_ipv4_cidr,
|
ipv4: zone_ipv4_cidr,
|
||||||
ipv6: guest_ipv6_cidr,
|
ipv6: zone_ipv6_cidr,
|
||||||
mac: guest_mac,
|
mac: zone_mac,
|
||||||
},
|
},
|
||||||
gateway: NetworkSide {
|
gateway: NetworkSide {
|
||||||
ipv4: gateway_ipv4_cidr,
|
ipv4: gateway_ipv4_cidr,
|
||||||
@ -175,7 +175,7 @@ impl AutoNetworkWatcher {
|
|||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = receiver.recv() => match x {
|
x = receiver.recv() => match x {
|
||||||
Ok(Event::GuestChanged(_)) => {
|
Ok(Event::ZoneChanged(_)) => {
|
||||||
break;
|
break;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -54,11 +54,11 @@ impl NetworkStack<'_> {
|
|||||||
match what {
|
match what {
|
||||||
NetworkStackSelect::Receive(Some(packet)) => {
|
NetworkStackSelect::Receive(Some(packet)) => {
|
||||||
if let Err(error) = self.bridge.to_bridge_sender.try_send(packet.clone()) {
|
if let Err(error) = self.bridge.to_bridge_sender.try_send(packet.clone()) {
|
||||||
trace!("failed to send guest packet to bridge: {}", error);
|
trace!("failed to send zone packet to bridge: {}", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(error) = self.nat.receive_sender.try_send(packet.clone()) {
|
if let Err(error) = self.nat.receive_sender.try_send(packet.clone()) {
|
||||||
trace!("failed to send guest packet to nat: {}", error);
|
trace!("failed to send zone packet to nat: {}", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.udev.rx = Some(packet);
|
self.udev.rx = Some(packet);
|
||||||
@ -137,7 +137,7 @@ impl NetworkBackend {
|
|||||||
.expect("failed to set ip addresses");
|
.expect("failed to set ip addresses");
|
||||||
});
|
});
|
||||||
let sockets = SocketSet::new(vec![]);
|
let sockets = SocketSet::new(vec![]);
|
||||||
let handle = self.bridge.join(self.metadata.guest.mac).await?;
|
let handle = self.bridge.join(self.metadata.zone.mac).await?;
|
||||||
let kdev = AsyncRawSocketChannel::new(mtu, kdev)?;
|
let kdev = AsyncRawSocketChannel::new(mtu, kdev)?;
|
||||||
Ok(NetworkStack {
|
Ok(NetworkStack {
|
||||||
tx: tx_receiver,
|
tx: tx_receiver,
|
||||||
@ -153,12 +153,12 @@ impl NetworkBackend {
|
|||||||
pub async fn launch(self) -> Result<JoinHandle<()>> {
|
pub async fn launch(self) -> Result<JoinHandle<()>> {
|
||||||
Ok(tokio::task::spawn(async move {
|
Ok(tokio::task::spawn(async move {
|
||||||
info!(
|
info!(
|
||||||
"launched network backend for krata guest {}",
|
"launched network backend for krata zone {}",
|
||||||
self.metadata.uuid
|
self.metadata.uuid
|
||||||
);
|
);
|
||||||
if let Err(error) = self.run().await {
|
if let Err(error) = self.run().await {
|
||||||
warn!(
|
warn!(
|
||||||
"network backend for krata guest {} failed: {}",
|
"network backend for krata zone {} failed: {}",
|
||||||
self.metadata.uuid, error
|
self.metadata.uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ impl NetworkBackend {
|
|||||||
impl Drop for NetworkBackend {
|
impl Drop for NetworkBackend {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
info!(
|
info!(
|
||||||
"destroyed network backend for krata guest {}",
|
"destroyed network backend for krata zone {}",
|
||||||
self.metadata.uuid
|
self.metadata.uuid
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use hbridge::HostBridge;
|
|||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
dial::ControlDialAddress,
|
dial::ControlDialAddress,
|
||||||
v1::{common::Guest, control::control_service_client::ControlServiceClient},
|
v1::{common::Zone, control::control_service_client::ControlServiceClient},
|
||||||
};
|
};
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use tokio::{task::JoinHandle, time::sleep};
|
use tokio::{task::JoinHandle, time::sleep};
|
||||||
@ -33,7 +33,7 @@ pub const EXTRA_MTU: usize = 20;
|
|||||||
|
|
||||||
pub struct NetworkService {
|
pub struct NetworkService {
|
||||||
pub control: ControlServiceClient<Channel>,
|
pub control: ControlServiceClient<Channel>,
|
||||||
pub guests: HashMap<Uuid, Guest>,
|
pub zones: HashMap<Uuid, Zone>,
|
||||||
pub backends: HashMap<Uuid, JoinHandle<()>>,
|
pub backends: HashMap<Uuid, JoinHandle<()>>,
|
||||||
pub bridge: VirtualBridge,
|
pub bridge: VirtualBridge,
|
||||||
pub hbridge: HostBridge,
|
pub hbridge: HostBridge,
|
||||||
@ -47,7 +47,7 @@ impl NetworkService {
|
|||||||
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
|
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
|
||||||
Ok(NetworkService {
|
Ok(NetworkService {
|
||||||
control,
|
control,
|
||||||
guests: HashMap::new(),
|
zones: HashMap::new(),
|
||||||
backends: HashMap::new(),
|
backends: HashMap::new(),
|
||||||
bridge,
|
bridge,
|
||||||
hbridge,
|
hbridge,
|
||||||
@ -99,7 +99,7 @@ impl NetworkService {
|
|||||||
|
|
||||||
Err((metadata, error)) => {
|
Err((metadata, error)) => {
|
||||||
warn!(
|
warn!(
|
||||||
"failed to launch network backend for krata guest {}: {}",
|
"failed to launch network backend for krata zone {}: {}",
|
||||||
metadata.uuid, error
|
metadata.uuid, error
|
||||||
);
|
);
|
||||||
failed.push(metadata.uuid);
|
failed.push(metadata.uuid);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "krata-runtime"
|
name = "krata-runtime"
|
||||||
description = "Runtime for running guests on the krata isolation engine"
|
description = "Runtime for managing zones on the krata isolation engine"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
homepage.workspace = true
|
homepage.workspace = true
|
||||||
@ -12,20 +12,20 @@ resolver = "2"
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
backhand = { workspace = true }
|
backhand = { workspace = true }
|
||||||
ipnetwork = { workspace = true }
|
ipnetwork = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.12" }
|
krata = { path = "../krata", version = "^0.0.13" }
|
||||||
krata-advmac = { workspace = true }
|
krata-advmac = { workspace = true }
|
||||||
krata-oci = { path = "../oci", version = "^0.0.12" }
|
krata-oci = { path = "../oci", version = "^0.0.13" }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
krata-loopdev = { path = "../loopdev", version = "^0.0.12" }
|
krata-loopdev = { path = "../loopdev", version = "^0.0.13" }
|
||||||
krata-xencall = { path = "../xen/xencall", version = "^0.0.12" }
|
krata-xencall = { path = "../xen/xencall", version = "^0.0.13" }
|
||||||
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.12" }
|
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.13" }
|
||||||
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.12" }
|
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.13" }
|
||||||
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.12" }
|
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.13" }
|
||||||
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.12" }
|
krata-xenplatform = { path = "../xen/xenplatform", version = "^0.0.13" }
|
||||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.12" }
|
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.13" }
|
||||||
walkdir = { workspace = true }
|
walkdir = { workspace = true }
|
||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
|
|
||||||
|
@ -99,23 +99,23 @@ impl IpVendor {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let assigned_ipv4 = store
|
let assigned_ipv4 = store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv4", dom_path))
|
||||||
.await?
|
.await?
|
||||||
.and_then(|x| Ipv4Network::from_str(&x).ok());
|
.and_then(|x| Ipv4Network::from_str(&x).ok());
|
||||||
let assigned_ipv6 = store
|
let assigned_ipv6 = store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv6", dom_path))
|
||||||
.await?
|
.await?
|
||||||
.and_then(|x| Ipv6Network::from_str(&x).ok());
|
.and_then(|x| Ipv6Network::from_str(&x).ok());
|
||||||
|
|
||||||
if let Some(existing_ipv4) = assigned_ipv4 {
|
if let Some(existing_ipv4) = assigned_ipv4 {
|
||||||
if let Some(previous) = state.ipv4.insert(existing_ipv4.ip(), uuid) {
|
if let Some(previous) = state.ipv4.insert(existing_ipv4.ip(), uuid) {
|
||||||
error!("ipv4 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv4.ip(), uuid, uuid);
|
error!("ipv4 conflict detected: zone {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv4.ip(), uuid, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(existing_ipv6) = assigned_ipv6 {
|
if let Some(existing_ipv6) = assigned_ipv6 {
|
||||||
if let Some(previous) = state.ipv6.insert(existing_ipv6.ip(), uuid) {
|
if let Some(previous) = state.ipv6.insert(existing_ipv6.ip(), uuid) {
|
||||||
error!("ipv6 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv6.ip(), uuid, uuid);
|
error!("ipv6 conflict detected: zone {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv6.ip(), uuid, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -251,13 +251,13 @@ impl IpVendor {
|
|||||||
intermediate.ipv6.insert(self.gateway_ipv6, self.host_uuid);
|
intermediate.ipv6.insert(self.gateway_ipv6, self.host_uuid);
|
||||||
for (ipv4, uuid) in &state.pending_ipv4 {
|
for (ipv4, uuid) in &state.pending_ipv4 {
|
||||||
if let Some(previous) = intermediate.ipv4.insert(*ipv4, *uuid) {
|
if let Some(previous) = intermediate.ipv4.insert(*ipv4, *uuid) {
|
||||||
error!("ipv4 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv4, uuid, uuid);
|
error!("ipv4 conflict detected: zone {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv4, uuid, uuid);
|
||||||
}
|
}
|
||||||
intermediate.pending_ipv4.insert(*ipv4, *uuid);
|
intermediate.pending_ipv4.insert(*ipv4, *uuid);
|
||||||
}
|
}
|
||||||
for (ipv6, uuid) in &state.pending_ipv6 {
|
for (ipv6, uuid) in &state.pending_ipv6 {
|
||||||
if let Some(previous) = intermediate.ipv6.insert(*ipv6, *uuid) {
|
if let Some(previous) = intermediate.ipv6.insert(*ipv6, *uuid) {
|
||||||
error!("ipv6 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv6, uuid, uuid);
|
error!("ipv6 conflict detected: zone {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv6, uuid, uuid);
|
||||||
}
|
}
|
||||||
intermediate.pending_ipv6.insert(*ipv6, *uuid);
|
intermediate.pending_ipv6.insert(*ipv6, *uuid);
|
||||||
}
|
}
|
||||||
@ -271,16 +271,16 @@ impl IpVendor {
|
|||||||
domid: u32,
|
domid: u32,
|
||||||
) -> Result<Option<IpAssignment>> {
|
) -> Result<Option<IpAssignment>> {
|
||||||
let dom_path = format!("/local/domain/{}", domid);
|
let dom_path = format!("/local/domain/{}", domid);
|
||||||
let Some(guest_ipv4) = self
|
let Some(zone_ipv4) = self
|
||||||
.store
|
.store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv4", dom_path))
|
||||||
.await?
|
.await?
|
||||||
else {
|
else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(guest_ipv6) = self
|
let Some(zone_ipv6) = self
|
||||||
.store
|
.store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv6", dom_path))
|
||||||
.await?
|
.await?
|
||||||
else {
|
else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@ -300,10 +300,10 @@ impl IpVendor {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(guest_ipv4) = Ipv4Network::from_str(&guest_ipv4).ok() else {
|
let Some(zone_ipv4) = Ipv4Network::from_str(&zone_ipv4).ok() else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(guest_ipv6) = Ipv6Network::from_str(&guest_ipv6).ok() else {
|
let Some(zone_ipv6) = Ipv6Network::from_str(&zone_ipv6).ok() else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(gateway_ipv4) = Ipv4Network::from_str(&gateway_ipv4).ok() else {
|
let Some(gateway_ipv4) = Ipv4Network::from_str(&gateway_ipv4).ok() else {
|
||||||
@ -315,10 +315,10 @@ impl IpVendor {
|
|||||||
Ok(Some(IpAssignment {
|
Ok(Some(IpAssignment {
|
||||||
vendor: self.clone(),
|
vendor: self.clone(),
|
||||||
uuid,
|
uuid,
|
||||||
ipv4: guest_ipv4.ip(),
|
ipv4: zone_ipv4.ip(),
|
||||||
ipv4_prefix: guest_ipv4.prefix(),
|
ipv4_prefix: zone_ipv4.prefix(),
|
||||||
ipv6: guest_ipv6.ip(),
|
ipv6: zone_ipv6.ip(),
|
||||||
ipv6_prefix: guest_ipv6.prefix(),
|
ipv6_prefix: zone_ipv6.prefix(),
|
||||||
gateway_ipv4: gateway_ipv4.ip(),
|
gateway_ipv4: gateway_ipv4.ip(),
|
||||||
gateway_ipv6: gateway_ipv6.ip(),
|
gateway_ipv6: gateway_ipv6.ip(),
|
||||||
committed: true,
|
committed: true,
|
||||||
|
@ -20,13 +20,13 @@ use xenplatform::domain::BaseDomainConfig;
|
|||||||
use crate::cfgblk::ConfigBlock;
|
use crate::cfgblk::ConfigBlock;
|
||||||
use crate::RuntimeContext;
|
use crate::RuntimeContext;
|
||||||
|
|
||||||
use super::{GuestInfo, GuestState};
|
use super::{ZoneInfo, ZoneState};
|
||||||
|
|
||||||
pub use xenclient::{
|
pub use xenclient::{
|
||||||
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct GuestLaunchRequest {
|
pub struct ZoneLaunchRequest {
|
||||||
pub format: LaunchPackedFormat,
|
pub format: LaunchPackedFormat,
|
||||||
pub kernel: Vec<u8>,
|
pub kernel: Vec<u8>,
|
||||||
pub initrd: Vec<u8>,
|
pub initrd: Vec<u8>,
|
||||||
@ -42,11 +42,11 @@ pub struct GuestLaunchRequest {
|
|||||||
pub addons_image: Option<PathBuf>,
|
pub addons_image: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestLauncher {
|
pub struct ZoneLauncher {
|
||||||
pub launch_semaphore: Arc<Semaphore>,
|
pub launch_semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLauncher {
|
impl ZoneLauncher {
|
||||||
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
|
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
|
||||||
Ok(Self { launch_semaphore })
|
Ok(Self { launch_semaphore })
|
||||||
}
|
}
|
||||||
@ -54,16 +54,16 @@ impl GuestLauncher {
|
|||||||
pub async fn launch(
|
pub async fn launch(
|
||||||
&mut self,
|
&mut self,
|
||||||
context: &RuntimeContext,
|
context: &RuntimeContext,
|
||||||
request: GuestLaunchRequest,
|
request: ZoneLaunchRequest,
|
||||||
) -> Result<GuestInfo> {
|
) -> Result<ZoneInfo> {
|
||||||
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
||||||
let xen_name = format!("krata-{uuid}");
|
let xen_name = format!("krata-{uuid}");
|
||||||
let mut gateway_mac = MacAddr6::random();
|
let mut gateway_mac = MacAddr6::random();
|
||||||
gateway_mac.set_local(true);
|
gateway_mac.set_local(true);
|
||||||
gateway_mac.set_multicast(false);
|
gateway_mac.set_multicast(false);
|
||||||
let mut container_mac = MacAddr6::random();
|
let mut zone_mac = MacAddr6::random();
|
||||||
container_mac.set_local(true);
|
zone_mac.set_local(true);
|
||||||
container_mac.set_multicast(false);
|
zone_mac.set_multicast(false);
|
||||||
|
|
||||||
let _launch_permit = self.launch_semaphore.acquire().await?;
|
let _launch_permit = self.launch_semaphore.acquire().await?;
|
||||||
let mut ip = context.ipvendor.assign(uuid).await?;
|
let mut ip = context.ipvendor.assign(uuid).await?;
|
||||||
@ -145,7 +145,7 @@ impl GuestLauncher {
|
|||||||
}
|
}
|
||||||
let cmdline = cmdline_options.join(" ");
|
let cmdline = cmdline_options.join(" ");
|
||||||
|
|
||||||
let guest_mac_string = container_mac.to_string().replace('-', ":");
|
let zone_mac_string = zone_mac.to_string().replace('-', ":");
|
||||||
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
|
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
|
||||||
|
|
||||||
let mut disks = vec![
|
let mut disks = vec![
|
||||||
@ -191,16 +191,16 @@ impl GuestLauncher {
|
|||||||
("krata/uuid".to_string(), uuid.to_string()),
|
("krata/uuid".to_string(), uuid.to_string()),
|
||||||
("krata/loops".to_string(), loops.join(",")),
|
("krata/loops".to_string(), loops.join(",")),
|
||||||
(
|
(
|
||||||
"krata/network/guest/ipv4".to_string(),
|
"krata/network/zone/ipv4".to_string(),
|
||||||
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/guest/ipv6".to_string(),
|
"krata/network/zone/ipv6".to_string(),
|
||||||
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/guest/mac".to_string(),
|
"krata/network/zone/mac".to_string(),
|
||||||
guest_mac_string.clone(),
|
zone_mac_string.clone(),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/gateway/ipv4".to_string(),
|
"krata/network/gateway/ipv4".to_string(),
|
||||||
@ -240,7 +240,7 @@ impl GuestLauncher {
|
|||||||
initialized: false,
|
initialized: false,
|
||||||
}],
|
}],
|
||||||
vifs: vec![DomainNetworkInterface {
|
vifs: vec![DomainNetworkInterface {
|
||||||
mac: guest_mac_string.clone(),
|
mac: zone_mac_string.clone(),
|
||||||
mtu: 1500,
|
mtu: 1500,
|
||||||
bridge: None,
|
bridge: None,
|
||||||
script: None,
|
script: None,
|
||||||
@ -248,20 +248,20 @@ impl GuestLauncher {
|
|||||||
pcis: request.pcis.clone(),
|
pcis: request.pcis.clone(),
|
||||||
filesystems: vec![],
|
filesystems: vec![],
|
||||||
extra_keys,
|
extra_keys,
|
||||||
extra_rw_paths: vec!["krata/guest".to_string()],
|
extra_rw_paths: vec!["krata/zone".to_string()],
|
||||||
};
|
};
|
||||||
match context.xen.create(&config).await {
|
match context.xen.create(&config).await {
|
||||||
Ok(created) => {
|
Ok(created) => {
|
||||||
ip.commit().await?;
|
ip.commit().await?;
|
||||||
Ok(GuestInfo {
|
Ok(ZoneInfo {
|
||||||
name: request.name.as_ref().map(|x| x.to_string()),
|
name: request.name.as_ref().map(|x| x.to_string()),
|
||||||
uuid,
|
uuid,
|
||||||
domid: created.domid,
|
domid: created.domid,
|
||||||
image: request.image.digest,
|
image: request.image.digest,
|
||||||
loops: vec![],
|
loops: vec![],
|
||||||
guest_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
|
zone_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
|
||||||
guest_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
|
zone_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
|
||||||
guest_mac: Some(guest_mac_string.clone()),
|
zone_mac: Some(zone_mac_string.clone()),
|
||||||
gateway_ipv4: Some(IpNetwork::new(
|
gateway_ipv4: Some(IpNetwork::new(
|
||||||
IpAddr::V4(ip.gateway_ipv4),
|
IpAddr::V4(ip.gateway_ipv4),
|
||||||
ip.ipv4_prefix,
|
ip.ipv4_prefix,
|
||||||
@ -271,7 +271,7 @@ impl GuestLauncher {
|
|||||||
ip.ipv6_prefix,
|
ip.ipv6_prefix,
|
||||||
)?),
|
)?),
|
||||||
gateway_mac: Some(gateway_mac_string.clone()),
|
gateway_mac: Some(gateway_mac_string.clone()),
|
||||||
state: GuestState { exit_code: None },
|
state: ZoneState { exit_code: None },
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
|
@ -12,7 +12,7 @@ use xenstore::{XsdClient, XsdInterface};
|
|||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
autoloop::AutoLoop,
|
autoloop::AutoLoop,
|
||||||
launch::{GuestLaunchRequest, GuestLauncher},
|
launch::{ZoneLaunchRequest, ZoneLauncher},
|
||||||
power::PowerManagementContext,
|
power::PowerManagementContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -29,29 +29,32 @@ type RuntimePlatform = xenplatform::x86pv::X86PvPlatform;
|
|||||||
#[cfg(not(target_arch = "x86_64"))]
|
#[cfg(not(target_arch = "x86_64"))]
|
||||||
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
|
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
|
||||||
|
|
||||||
pub struct GuestLoopInfo {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneLoopInfo {
|
||||||
pub device: String,
|
pub device: String,
|
||||||
pub file: String,
|
pub file: String,
|
||||||
pub delete: Option<String>,
|
pub delete: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestState {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneState {
|
||||||
pub exit_code: Option<i32>,
|
pub exit_code: Option<i32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestInfo {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneInfo {
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub domid: u32,
|
pub domid: u32,
|
||||||
pub image: String,
|
pub image: String,
|
||||||
pub loops: Vec<GuestLoopInfo>,
|
pub loops: Vec<ZoneLoopInfo>,
|
||||||
pub guest_ipv4: Option<IpNetwork>,
|
pub zone_ipv4: Option<IpNetwork>,
|
||||||
pub guest_ipv6: Option<IpNetwork>,
|
pub zone_ipv6: Option<IpNetwork>,
|
||||||
pub guest_mac: Option<String>,
|
pub zone_mac: Option<String>,
|
||||||
pub gateway_ipv4: Option<IpNetwork>,
|
pub gateway_ipv4: Option<IpNetwork>,
|
||||||
pub gateway_ipv6: Option<IpNetwork>,
|
pub gateway_ipv6: Option<IpNetwork>,
|
||||||
pub gateway_mac: Option<String>,
|
pub gateway_mac: Option<String>,
|
||||||
pub state: GuestState,
|
pub state: ZoneState,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -75,8 +78,8 @@ impl RuntimeContext {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
||||||
let mut guests: Vec<GuestInfo> = Vec::new();
|
let mut zones: Vec<ZoneInfo> = Vec::new();
|
||||||
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
||||||
if domid_candidate == "0" {
|
if domid_candidate == "0" {
|
||||||
continue;
|
continue;
|
||||||
@ -112,20 +115,20 @@ impl RuntimeContext {
|
|||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/loops", &dom_path))
|
.read_string(&format!("{}/krata/loops", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_ipv4 = self
|
let zone_ipv4 = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/ipv4", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/ipv4", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_ipv6 = self
|
let zone_ipv6 = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/ipv6", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/ipv6", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_mac = self
|
let zone_mac = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/mac", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/mac", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let gateway_ipv4 = self
|
let gateway_ipv4 = self
|
||||||
.xen
|
.xen
|
||||||
@ -143,14 +146,14 @@ impl RuntimeContext {
|
|||||||
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
|
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let guest_ipv4 = if let Some(guest_ipv4) = guest_ipv4 {
|
let zone_ipv4 = if let Some(zone_ipv4) = zone_ipv4 {
|
||||||
IpNetwork::from_str(&guest_ipv4).ok()
|
IpNetwork::from_str(&zone_ipv4).ok()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let guest_ipv6 = if let Some(guest_ipv6) = guest_ipv6 {
|
let zone_ipv6 = if let Some(zone_ipv6) = zone_ipv6 {
|
||||||
IpNetwork::from_str(&guest_ipv6).ok()
|
IpNetwork::from_str(&zone_ipv6).ok()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -170,7 +173,7 @@ impl RuntimeContext {
|
|||||||
let exit_code = self
|
let exit_code = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/guest/exit-code", &dom_path))
|
.read_string(&format!("{}/krata/zone/exit-code", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let exit_code: Option<i32> = match exit_code {
|
let exit_code: Option<i32> = match exit_code {
|
||||||
@ -178,37 +181,37 @@ impl RuntimeContext {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = GuestState { exit_code };
|
let state = ZoneState { exit_code };
|
||||||
|
|
||||||
let loops = RuntimeContext::parse_loop_set(&loops);
|
let loops = RuntimeContext::parse_loop_set(&loops);
|
||||||
guests.push(GuestInfo {
|
zones.push(ZoneInfo {
|
||||||
name,
|
name,
|
||||||
uuid,
|
uuid,
|
||||||
domid,
|
domid,
|
||||||
image,
|
image,
|
||||||
loops,
|
loops,
|
||||||
guest_ipv4,
|
zone_ipv4,
|
||||||
guest_ipv6,
|
zone_ipv6,
|
||||||
guest_mac,
|
zone_mac,
|
||||||
gateway_ipv4,
|
gateway_ipv4,
|
||||||
gateway_ipv6,
|
gateway_ipv6,
|
||||||
gateway_mac,
|
gateway_mac,
|
||||||
state,
|
state,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(guests)
|
Ok(zones)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<GuestInfo>> {
|
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<ZoneInfo>> {
|
||||||
for guest in self.list().await? {
|
for zone in self.list().await? {
|
||||||
if guest.uuid == uuid {
|
if zone.uuid == uuid {
|
||||||
return Ok(Some(guest));
|
return Ok(Some(zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_loop_set(input: &Option<String>) -> Vec<GuestLoopInfo> {
|
fn parse_loop_set(input: &Option<String>) -> Vec<ZoneLoopInfo> {
|
||||||
let Some(input) = input else {
|
let Some(input) = input else {
|
||||||
return Vec::new();
|
return Vec::new();
|
||||||
};
|
};
|
||||||
@ -219,7 +222,7 @@ impl RuntimeContext {
|
|||||||
.map(|x| (x[0].clone(), x[1].clone(), x[2].clone()))
|
.map(|x| (x[0].clone(), x[1].clone(), x[2].clone()))
|
||||||
.collect::<Vec<(String, String, String)>>();
|
.collect::<Vec<(String, String, String)>>();
|
||||||
sets.iter()
|
sets.iter()
|
||||||
.map(|(device, file, delete)| GuestLoopInfo {
|
.map(|(device, file, delete)| ZoneLoopInfo {
|
||||||
device: device.clone(),
|
device: device.clone(),
|
||||||
file: file.clone(),
|
file: file.clone(),
|
||||||
delete: if delete == "none" {
|
delete: if delete == "none" {
|
||||||
@ -228,7 +231,7 @@ impl RuntimeContext {
|
|||||||
Some(delete.clone())
|
Some(delete.clone())
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.collect::<Vec<GuestLoopInfo>>()
|
.collect::<Vec<ZoneLoopInfo>>()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,8 +252,8 @@ impl Runtime {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(&self, request: GuestLaunchRequest) -> Result<GuestInfo> {
|
pub async fn launch(&self, request: ZoneLaunchRequest) -> Result<ZoneInfo> {
|
||||||
let mut launcher = GuestLauncher::new(self.launch_semaphore.clone())?;
|
let mut launcher = ZoneLauncher::new(self.launch_semaphore.clone())?;
|
||||||
launcher.launch(&self.context, request).await
|
launcher.launch(&self.context, request).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,7 +262,7 @@ impl Runtime {
|
|||||||
.context
|
.context
|
||||||
.resolve(uuid)
|
.resolve(uuid)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
|
.ok_or_else(|| anyhow!("unable to resolve zone: {}", uuid))?;
|
||||||
let domid = info.domid;
|
let domid = info.domid;
|
||||||
let store = XsdClient::open().await?;
|
let store = XsdClient::open().await?;
|
||||||
let dom_path = store.get_domain_path(domid).await?;
|
let dom_path = store.get_domain_path(domid).await?;
|
||||||
@ -307,7 +310,7 @@ impl Runtime {
|
|||||||
if let Some(ip) = ip {
|
if let Some(ip) = ip {
|
||||||
if let Err(error) = self.context.ipvendor.recall(&ip).await {
|
if let Err(error) = self.context.ipvendor.recall(&ip).await {
|
||||||
error!(
|
error!(
|
||||||
"failed to recall ip assignment for guest {}: {}",
|
"failed to recall ip assignment for zone {}: {}",
|
||||||
uuid, error
|
uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -316,7 +319,7 @@ impl Runtime {
|
|||||||
Ok(uuid)
|
Ok(uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
||||||
self.context.list().await
|
self.context.list().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ pub struct CpuTopologyInfo {
|
|||||||
pub class: CpuClass,
|
pub class: CpuClass,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
|
fn labeled_topology(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
|
||||||
let mut cores: IndexMap<(u32, u32, u32), Vec<CpuTopologyInfo>> = IndexMap::new();
|
let mut cores: IndexMap<(u32, u32, u32), Vec<CpuTopologyInfo>> = IndexMap::new();
|
||||||
let mut pe_cores = false;
|
let mut pe_cores = false;
|
||||||
let mut last: Option<SysctlCputopo> = None;
|
let mut last: Option<SysctlCputopo> = None;
|
||||||
@ -140,9 +140,9 @@ impl PowerManagementContext {
|
|||||||
/// If there is a p-core/e-core split, then CPU class will be defined as
|
/// If there is a p-core/e-core split, then CPU class will be defined as
|
||||||
/// `CpuClass::Performance` or `CpuClass::Efficiency`, else `CpuClass::Standard`.
|
/// `CpuClass::Performance` or `CpuClass::Efficiency`, else `CpuClass::Standard`.
|
||||||
pub async fn cpu_topology(&self) -> Result<Vec<CpuTopologyInfo>> {
|
pub async fn cpu_topology(&self) -> Result<Vec<CpuTopologyInfo>> {
|
||||||
let xentopo = self.context.xen.call.cpu_topology().await?;
|
let xen_topology = self.context.xen.call.cpu_topology().await?;
|
||||||
let logicaltopo = labelled_topo(&xentopo);
|
let logical_topology = labeled_topology(&xen_topology);
|
||||||
Ok(logicaltopo)
|
Ok(logical_topology)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enable or disable SMT awareness in the scheduler.
|
/// Enable or disable SMT awareness in the scheduler.
|
||||||
|
@ -13,9 +13,9 @@ async-trait = { workspace = true }
|
|||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
krata-xencall = { path = "../xencall", version = "^0.0.12" }
|
krata-xencall = { path = "../xencall", version = "^0.0.13" }
|
||||||
krata-xenplatform = { path = "../xenplatform", version = "^0.0.12" }
|
krata-xenplatform = { path = "../xenplatform", version = "^0.0.13" }
|
||||||
krata-xenstore = { path = "../xenstore", version = "^0.0.12" }
|
krata-xenstore = { path = "../xenstore", version = "^0.0.13" }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
@ -16,7 +16,7 @@ flate2 = { workspace = true }
|
|||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
krata-xencall = { path = "../xencall", version = "^0.0.12" }
|
krata-xencall = { path = "../xencall", version = "^0.0.13" }
|
||||||
memchr = { workspace = true }
|
memchr = { workspace = true }
|
||||||
nix = { workspace = true }
|
nix = { workspace = true }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "krata-guest"
|
name = "krata-zone"
|
||||||
description = "Guest services for the krata isolation engine"
|
description = "zone services for the krata isolation engine"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
homepage.workspace = true
|
homepage.workspace = true
|
||||||
@ -14,8 +14,8 @@ cgroups-rs = { workspace = true }
|
|||||||
env_logger = { workspace = true }
|
env_logger = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
ipnetwork = { workspace = true }
|
ipnetwork = { workspace = true }
|
||||||
krata = { path = "../krata", version = "^0.0.12" }
|
krata = { path = "../krata", version = "^0.0.13" }
|
||||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.12" }
|
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.13" }
|
||||||
libc = { workspace = true }
|
libc = { workspace = true }
|
||||||
log = { workspace = true }
|
log = { workspace = true }
|
||||||
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
|
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
|
||||||
@ -30,8 +30,8 @@ sysinfo = { workspace = true }
|
|||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "krataguest"
|
name = "kratazone"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "krataguest"
|
name = "krata-zone"
|
||||||
path = "bin/init.rs"
|
path = "bin/init.rs"
|
19
crates/zone/bin/init.rs
Normal file
19
crates/zone/bin/init.rs
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use env_logger::Env;
|
||||||
|
use kratazone::{death, init::ZoneInit};
|
||||||
|
use log::error;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
env::set_var("RUST_BACKTRACE", "1");
|
||||||
|
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
|
||||||
|
let mut zone = ZoneInit::new();
|
||||||
|
if let Err(error) = zone.init().await {
|
||||||
|
error!("failed to initialize zone: {}", error);
|
||||||
|
death(127).await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
death(1).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,7 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
childwait::{ChildEvent, ChildWait},
|
childwait::{ChildEvent, ChildWait},
|
||||||
death,
|
death,
|
||||||
exec::GuestExecTask,
|
exec::ZoneExecTask,
|
||||||
metrics::MetricsCollector,
|
metrics::MetricsCollector,
|
||||||
};
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
@ -18,20 +18,16 @@ use log::debug;
|
|||||||
use nix::unistd::Pid;
|
use nix::unistd::Pid;
|
||||||
use tokio::{select, sync::broadcast};
|
use tokio::{select, sync::broadcast};
|
||||||
|
|
||||||
pub struct GuestBackground {
|
pub struct ZoneBackground {
|
||||||
idm: IdmInternalClient,
|
idm: IdmInternalClient,
|
||||||
child: Pid,
|
child: Pid,
|
||||||
_cgroup: Cgroup,
|
_cgroup: Cgroup,
|
||||||
wait: ChildWait,
|
wait: ChildWait,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestBackground {
|
impl ZoneBackground {
|
||||||
pub async fn new(
|
pub async fn new(idm: IdmInternalClient, cgroup: Cgroup, child: Pid) -> Result<ZoneBackground> {
|
||||||
idm: IdmInternalClient,
|
Ok(ZoneBackground {
|
||||||
cgroup: Cgroup,
|
|
||||||
child: Pid,
|
|
||||||
) -> Result<GuestBackground> {
|
|
||||||
Ok(GuestBackground {
|
|
||||||
idm,
|
idm,
|
||||||
child,
|
child,
|
||||||
_cgroup: cgroup,
|
_cgroup: cgroup,
|
||||||
@ -134,7 +130,7 @@ impl GuestBackground {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
|
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
let exec = GuestExecTask { handle };
|
let exec = ZoneExecTask { handle };
|
||||||
if let Err(error) = exec.run().await {
|
if let Err(error) = exec.run().await {
|
||||||
let _ = exec
|
let _ = exec
|
||||||
.handle
|
.handle
|
@ -15,11 +15,11 @@ use tokio::{
|
|||||||
process::Command,
|
process::Command,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct GuestExecTask {
|
pub struct ZoneExecTask {
|
||||||
pub handle: IdmClientStreamResponseHandle<Request>,
|
pub handle: IdmClientStreamResponseHandle<Request>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestExecTask {
|
impl ZoneExecTask {
|
||||||
pub async fn run(&self) -> Result<()> {
|
pub async fn run(&self) -> Result<()> {
|
||||||
let mut receiver = self.handle.take().await?;
|
let mut receiver = self.handle.take().await?;
|
||||||
|
|
@ -26,7 +26,7 @@ use std::str::FromStr;
|
|||||||
use sys_mount::{FilesystemType, Mount, MountFlags};
|
use sys_mount::{FilesystemType, Mount, MountFlags};
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
|
|
||||||
use crate::background::GuestBackground;
|
use crate::background::ZoneBackground;
|
||||||
|
|
||||||
const IMAGE_BLOCK_DEVICE_PATH: &str = "/dev/xvda";
|
const IMAGE_BLOCK_DEVICE_PATH: &str = "/dev/xvda";
|
||||||
const CONFIG_BLOCK_DEVICE_PATH: &str = "/dev/xvdb";
|
const CONFIG_BLOCK_DEVICE_PATH: &str = "/dev/xvdb";
|
||||||
@ -57,17 +57,17 @@ const ADDONS_MODULES_PATH: &str = "/addons/modules";
|
|||||||
|
|
||||||
ioctl_write_int_bad!(set_controlling_terminal, TIOCSCTTY);
|
ioctl_write_int_bad!(set_controlling_terminal, TIOCSCTTY);
|
||||||
|
|
||||||
pub struct GuestInit {}
|
pub struct ZoneInit {}
|
||||||
|
|
||||||
impl Default for GuestInit {
|
impl Default for ZoneInit {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::new()
|
Self::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestInit {
|
impl ZoneInit {
|
||||||
pub fn new() -> GuestInit {
|
pub fn new() -> ZoneInit {
|
||||||
GuestInit {}
|
ZoneInit {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(&mut self) -> Result<()> {
|
pub async fn init(&mut self) -> Result<()> {
|
||||||
@ -127,7 +127,7 @@ impl GuestInit {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(cfg) = config.config() {
|
if let Some(cfg) = config.config() {
|
||||||
trace!("running guest task");
|
trace!("running zone task");
|
||||||
self.run(cfg, &launch, idm).await?;
|
self.run(cfg, &launch, idm).await?;
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
@ -521,7 +521,7 @@ impl GuestInit {
|
|||||||
|
|
||||||
let mut env = HashMap::new();
|
let mut env = HashMap::new();
|
||||||
if let Some(config_env) = config.env() {
|
if let Some(config_env) = config.env() {
|
||||||
env.extend(GuestInit::env_map(config_env));
|
env.extend(ZoneInit::env_map(config_env));
|
||||||
}
|
}
|
||||||
env.extend(launch.env.clone());
|
env.extend(launch.env.clone());
|
||||||
env.insert("KRATA_CONTAINER".to_string(), "1".to_string());
|
env.insert("KRATA_CONTAINER".to_string(), "1".to_string());
|
||||||
@ -540,13 +540,13 @@ impl GuestInit {
|
|||||||
return Err(anyhow!("cannot get file name of command path as str"));
|
return Err(anyhow!("cannot get file name of command path as str"));
|
||||||
};
|
};
|
||||||
cmd.insert(0, file_name.to_string());
|
cmd.insert(0, file_name.to_string());
|
||||||
let env = GuestInit::env_list(env);
|
let env = ZoneInit::env_list(env);
|
||||||
|
|
||||||
trace!("running guest command: {}", cmd.join(" "));
|
trace!("running zone command: {}", cmd.join(" "));
|
||||||
|
|
||||||
let path = CString::new(path.as_os_str().as_bytes())?;
|
let path = CString::new(path.as_os_str().as_bytes())?;
|
||||||
let cmd = GuestInit::strings_as_cstrings(cmd)?;
|
let cmd = ZoneInit::strings_as_cstrings(cmd)?;
|
||||||
let env = GuestInit::strings_as_cstrings(env)?;
|
let env = ZoneInit::strings_as_cstrings(env)?;
|
||||||
let mut working_dir = config
|
let mut working_dir = config
|
||||||
.working_dir()
|
.working_dir()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@ -566,7 +566,7 @@ impl GuestInit {
|
|||||||
async fn init_cgroup(&self) -> Result<Cgroup> {
|
async fn init_cgroup(&self) -> Result<Cgroup> {
|
||||||
trace!("initializing cgroup");
|
trace!("initializing cgroup");
|
||||||
let hierarchy = cgroups_rs::hierarchies::auto();
|
let hierarchy = cgroups_rs::hierarchies::auto();
|
||||||
let cgroup = Cgroup::new(hierarchy, "krata-guest-task")?;
|
let cgroup = Cgroup::new(hierarchy, "krata-zone-task")?;
|
||||||
cgroup.set_cgroup_type("threaded")?;
|
cgroup.set_cgroup_type("threaded")?;
|
||||||
trace!("initialized cgroup");
|
trace!("initialized cgroup");
|
||||||
Ok(cgroup)
|
Ok(cgroup)
|
||||||
@ -619,7 +619,7 @@ impl GuestInit {
|
|||||||
cmd: Vec<CString>,
|
cmd: Vec<CString>,
|
||||||
env: Vec<CString>,
|
env: Vec<CString>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
GuestInit::set_controlling_terminal()?;
|
ZoneInit::set_controlling_terminal()?;
|
||||||
std::env::set_current_dir(working_dir)?;
|
std::env::set_current_dir(working_dir)?;
|
||||||
cgroup.add_task(CgroupPid::from(std::process::id() as u64))?;
|
cgroup.add_task(CgroupPid::from(std::process::id() as u64))?;
|
||||||
execve(&path, &cmd, &env)?;
|
execve(&path, &cmd, &env)?;
|
||||||
@ -640,7 +640,7 @@ impl GuestInit {
|
|||||||
cgroup: Cgroup,
|
cgroup: Cgroup,
|
||||||
executed: Pid,
|
executed: Pid,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
|
let mut background = ZoneBackground::new(idm, cgroup, executed).await?;
|
||||||
background.run().await?;
|
background.run().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
@ -13,7 +13,7 @@ pub mod metrics;
|
|||||||
pub async fn death(code: c_int) -> Result<()> {
|
pub async fn death(code: c_int) -> Result<()> {
|
||||||
let store = XsdClient::open().await?;
|
let store = XsdClient::open().await?;
|
||||||
store
|
store
|
||||||
.write_string("krata/guest/exit-code", &code.to_string())
|
.write_string("krata/zone/exit-code", &code.to_string())
|
||||||
.await?;
|
.await?;
|
||||||
drop(store);
|
drop(store);
|
||||||
loop {
|
loop {
|
@ -14,7 +14,7 @@ impl MetricsCollector {
|
|||||||
pub fn collect(&self) -> Result<MetricNode> {
|
pub fn collect(&self) -> Result<MetricNode> {
|
||||||
let mut sysinfo = sysinfo::System::new();
|
let mut sysinfo = sysinfo::System::new();
|
||||||
Ok(MetricNode::structural(
|
Ok(MetricNode::structural(
|
||||||
"guest",
|
"zone",
|
||||||
vec![
|
vec![
|
||||||
self.collect_system(&mut sysinfo)?,
|
self.collect_system(&mut sysinfo)?,
|
||||||
self.collect_processes(&mut sysinfo)?,
|
self.collect_processes(&mut sysinfo)?,
|
@ -47,9 +47,6 @@ do
|
|||||||
elif [ "${FORM}" = "bundle-systemd" ]
|
elif [ "${FORM}" = "bundle-systemd" ]
|
||||||
then
|
then
|
||||||
asset "${SOURCE_FILE_PATH}" "target/assets/krata-systemd_${TAG_NAME}_${PLATFORM}.tgz"
|
asset "${SOURCE_FILE_PATH}" "target/assets/krata-systemd_${TAG_NAME}_${PLATFORM}.tgz"
|
||||||
elif [ "${FORM}" = "os" ]
|
|
||||||
then
|
|
||||||
asset "${SOURCE_FILE_PATH}" "target/assets/krata_${TAG_NAME}_${PLATFORM}.qcow2"
|
|
||||||
else
|
else
|
||||||
echo "ERROR: Unknown form '${FORM}'"
|
echo "ERROR: Unknown form '${FORM}'"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -1,10 +1,28 @@
|
|||||||
#!/bin/sh
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
sudo apt-get update
|
CROSS_RS_REV="7b79041c9278769eca57fae10c74741f5aa5c14b"
|
||||||
sudo apt-get install -y \
|
FPM_VERSION="1.15.1"
|
||||||
build-essential libssl-dev libelf-dev musl-dev \
|
|
||||||
flex bison bc protobuf-compiler musl-tools qemu-utils gcc-aarch64-linux-gnu
|
|
||||||
|
|
||||||
sudo gem install --no-document fpm
|
PACKAGES=(build-essential musl-dev protobuf-compiler musl-tools)
|
||||||
cargo install cross --git https://github.com/cross-rs/cross
|
|
||||||
|
sudo apt-get update
|
||||||
|
|
||||||
|
if [ "${TARGET_ARCH}" = "aarch64" ]
|
||||||
|
then
|
||||||
|
PACKAGES+=(gcc-aarch64-linux-gnu)
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo apt-get install -y "${PACKAGES[@]}"
|
||||||
|
|
||||||
|
CROSS_COMPILE="$(./hack/build/cross-compile.sh)"
|
||||||
|
|
||||||
|
if [ "${CROSS_COMPILE}" = "1" ]
|
||||||
|
then
|
||||||
|
cargo install cross --git "https://github.com/cross-rs/cross.git" --rev "${CROSS_RS_REV}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${CI_NEEDS_FPM}" = "1" ]
|
||||||
|
then
|
||||||
|
sudo gem install --no-document fpm -v "${FPM_VERSION}"
|
||||||
|
fi
|
||||||
|
@ -5,4 +5,3 @@ REAL_SCRIPT="$(realpath "${0}")"
|
|||||||
cd "$(dirname "${REAL_SCRIPT}")/../.."
|
cd "$(dirname "${REAL_SCRIPT}")/../.."
|
||||||
|
|
||||||
find hack -type f -name '*.sh' -print0 | xargs -0 shellcheck -x
|
find hack -type f -name '*.sh' -print0 | xargs -0 shellcheck -x
|
||||||
find os/internal -type f -name '*.sh' -print0 | xargs -0 shellcheck -x
|
|
||||||
|
@ -19,12 +19,12 @@ fi
|
|||||||
build_and_run() {
|
build_and_run() {
|
||||||
EXE_TARGET="${1}"
|
EXE_TARGET="${1}"
|
||||||
shift
|
shift
|
||||||
sudo mkdir -p /var/lib/krata/guest
|
sudo mkdir -p /var/lib/krata/zone
|
||||||
if [ "${KRATA_BUILD_INITRD}" = "1" ]
|
if [ "${KRATA_BUILD_INITRD}" = "1" ]
|
||||||
then
|
then
|
||||||
TARGET_ARCH="$(./hack/build/arch.sh)"
|
TARGET_ARCH="$(./hack/build/arch.sh)"
|
||||||
./hack/initrd/build.sh ${CARGO_BUILD_FLAGS}
|
./hack/initrd/build.sh ${CARGO_BUILD_FLAGS}
|
||||||
sudo cp "target/initrd/initrd-${TARGET_ARCH}" "/var/lib/krata/guest/initrd"
|
sudo cp "target/initrd/initrd-${TARGET_ARCH}" "/var/lib/krata/zone/initrd"
|
||||||
fi
|
fi
|
||||||
RUST_TARGET="$(./hack/build/target.sh)"
|
RUST_TARGET="$(./hack/build/target.sh)"
|
||||||
./hack/build/cargo.sh build ${CARGO_BUILD_FLAGS} --bin "${EXE_TARGET}"
|
./hack/build/cargo.sh build ${CARGO_BUILD_FLAGS} --bin "${EXE_TARGET}"
|
||||||
|
6
hack/dist/systar.sh
vendored
6
hack/dist/systar.sh
vendored
@ -38,9 +38,9 @@ else
|
|||||||
mv ../krata/kratad.service ../krata/kratanet.service usr/lib/systemd/system/
|
mv ../krata/kratad.service ../krata/kratanet.service usr/lib/systemd/system/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p usr/share/krata/guest
|
mkdir -p usr/share/krata/zone
|
||||||
mv ../krata/kernel ../krata/initrd usr/share/krata/guest
|
mv ../krata/kernel ../krata/initrd usr/share/krata/zone
|
||||||
mv ../krata/addons.squashfs usr/share/krata/guest/addons.squashfs
|
mv ../krata/addons.squashfs usr/share/krata/zone/addons.squashfs
|
||||||
|
|
||||||
tar czf "${SYSTAR}" --owner 0 --group 0 .
|
tar czf "${SYSTAR}" --owner 0 --group 0 .
|
||||||
|
|
||||||
|
@ -12,9 +12,9 @@ export TARGET_LIBC="musl"
|
|||||||
RUST_TARGET="$(./hack/build/target.sh)"
|
RUST_TARGET="$(./hack/build/target.sh)"
|
||||||
export RUSTFLAGS="-Ctarget-feature=+crt-static"
|
export RUSTFLAGS="-Ctarget-feature=+crt-static"
|
||||||
|
|
||||||
./hack/build/cargo.sh build "${@}" --release --bin krataguest
|
./hack/build/cargo.sh build "${@}" --release --bin krata-zone
|
||||||
INITRD_DIR="$(mktemp -d /tmp/krata-initrd.XXXXXXXXXXXXX)"
|
INITRD_DIR="$(mktemp -d /tmp/krata-initrd.XXXXXXXXXXXXX)"
|
||||||
cp "target/${RUST_TARGET}/release/krataguest" "${INITRD_DIR}/init"
|
cp "target/${RUST_TARGET}/release/krata-zone" "${INITRD_DIR}/init"
|
||||||
chmod +x "${INITRD_DIR}/init"
|
chmod +x "${INITRD_DIR}/init"
|
||||||
cd "${INITRD_DIR}"
|
cd "${INITRD_DIR}"
|
||||||
mkdir -p "${KRATA_DIR}/target/initrd"
|
mkdir -p "${KRATA_DIR}/target/initrd"
|
||||||
|
119
hack/os/build.sh
119
hack/os/build.sh
@ -1,119 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
REAL_SCRIPT="$(realpath "${0}")"
|
|
||||||
cd "$(dirname "${REAL_SCRIPT}")/../.."
|
|
||||||
|
|
||||||
./hack/dist/apk.sh
|
|
||||||
KRATA_VERSION="$(./hack/dist/version.sh)"
|
|
||||||
TARGET_ARCH="$(./hack/build/arch.sh)"
|
|
||||||
TARGET_ARCH_ALT="$(KRATA_ARCH_KERNEL_NAME=1 ./hack/build/arch.sh)"
|
|
||||||
CROSS_COMPILE="$(./hack/build/cross-compile.sh)"
|
|
||||||
|
|
||||||
TARGET_DIR="${PWD}/target"
|
|
||||||
TARGET_OS_DIR="${TARGET_DIR}/os"
|
|
||||||
mkdir -p "${TARGET_OS_DIR}"
|
|
||||||
cp "${TARGET_DIR}/dist/krata_${KRATA_VERSION}_${TARGET_ARCH}.apk" "${TARGET_OS_DIR}/krata-${TARGET_ARCH}.apk"
|
|
||||||
|
|
||||||
DOCKER_FLAGS="--platform linux/${TARGET_ARCH_ALT}"
|
|
||||||
if [ -t 0 ]
|
|
||||||
then
|
|
||||||
DOCKER_FLAGS="${DOCKER_FLAGS} -it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${CROSS_COMPILE}" = "1" ]
|
|
||||||
then
|
|
||||||
docker run --privileged --rm tonistiigi/binfmt --install all
|
|
||||||
fi
|
|
||||||
|
|
||||||
ROOTFS="${TARGET_OS_DIR}/rootfs-${TARGET_ARCH}.tar"
|
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
docker run --rm --privileged -v "${PWD}:/mnt" ${DOCKER_FLAGS} alpine:latest "/mnt/os/internal/stage1.sh" "${TARGET_ARCH}"
|
|
||||||
sudo chown "${USER}:${GROUP}" "${ROOTFS}"
|
|
||||||
sudo modprobe nbd
|
|
||||||
|
|
||||||
next_nbd_device() {
|
|
||||||
find /dev -maxdepth 2 -name 'nbd[0-9]*' | while read -r DEVICE
|
|
||||||
do
|
|
||||||
if [ "$(sudo blockdev --getsize64 "${DEVICE}")" = "0" ]
|
|
||||||
then
|
|
||||||
echo "${DEVICE}"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
NBD_DEVICE="$(next_nbd_device)"
|
|
||||||
|
|
||||||
if [ -z "${NBD_DEVICE}" ]
|
|
||||||
then
|
|
||||||
echo "ERROR: unable to allocate nbd device" > /dev/stderr
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
OS_IMAGE="${TARGET_OS_DIR}/krata-${TARGET_ARCH}.qcow2"
|
|
||||||
EFI_PART="${NBD_DEVICE}p1"
|
|
||||||
ROOT_PART="${NBD_DEVICE}p2"
|
|
||||||
ROOT_DIR="${TARGET_OS_DIR}/root-${TARGET_ARCH}"
|
|
||||||
EFI_DIR="${ROOT_DIR}/boot/efi"
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
trap '' EXIT HUP INT TERM
|
|
||||||
sudo umount -R "${ROOT_DIR}" > /dev/null 2>&1 || true
|
|
||||||
sudo umount "${EFI_PART}" > /dev/null 2>&1 || true
|
|
||||||
sudo umount "${ROOT_PART}" > /dev/null 2>&1 || true
|
|
||||||
sudo qemu-nbd --disconnect "${NBD_DEVICE}" > /dev/null 2>&1 || true
|
|
||||||
sudo rm -rf "${ROOT_DIR}"
|
|
||||||
}
|
|
||||||
|
|
||||||
rm -f "${OS_IMAGE}"
|
|
||||||
qemu-img create -f qcow2 "${OS_IMAGE}" "2G"
|
|
||||||
|
|
||||||
trap cleanup EXIT HUP INT TERM
|
|
||||||
sudo qemu-nbd --connect="${NBD_DEVICE}" --cache=writeback -f qcow2 "${OS_IMAGE}"
|
|
||||||
printf '%s\n' \
|
|
||||||
'label: gpt' \
|
|
||||||
'name=efi,type=U,size=128M,bootable' \
|
|
||||||
'name=system,type=L' | sudo sfdisk "${NBD_DEVICE}"
|
|
||||||
sudo mkfs.fat -F32 -n EFI "${EFI_PART}"
|
|
||||||
sudo mkfs.ext4 -L root -E discard "${ROOT_PART}"
|
|
||||||
|
|
||||||
mkdir -p "${ROOT_DIR}"
|
|
||||||
|
|
||||||
sudo mount -t ext4 "${ROOT_PART}" "${ROOT_DIR}"
|
|
||||||
sudo mkdir -p "${EFI_DIR}"
|
|
||||||
sudo mount -t vfat "${EFI_PART}" "${EFI_DIR}"
|
|
||||||
|
|
||||||
sudo tar xf "${ROOTFS}" -C "${ROOT_DIR}"
|
|
||||||
ROOT_UUID="$(sudo blkid "${ROOT_PART}" | sed -En 's/.*\bUUID="([^"]+)".*/\1/p')"
|
|
||||||
EFI_UUID="$(sudo blkid "${EFI_PART}" | sed -En 's/.*\bUUID="([^"]+)".*/\1/p')"
|
|
||||||
echo "${ROOT_UUID}"
|
|
||||||
|
|
||||||
sudo mkdir -p "${ROOT_DIR}/proc" "${ROOT_DIR}/dev" "${ROOT_DIR}/sys"
|
|
||||||
sudo mount -t proc none "${ROOT_DIR}/proc"
|
|
||||||
sudo mount --bind /dev "${ROOT_DIR}/dev"
|
|
||||||
sudo mount --make-private "${ROOT_DIR}/dev"
|
|
||||||
sudo mount --bind /sys "${ROOT_DIR}/sys"
|
|
||||||
sudo mount --make-private "${ROOT_DIR}/sys"
|
|
||||||
|
|
||||||
sudo cp "${PWD}/os/internal/stage2.sh" "${ROOT_DIR}/stage2.sh"
|
|
||||||
echo "${ROOT_UUID}" | sudo tee "${ROOT_DIR}/root-uuid" > /dev/null
|
|
||||||
sudo mv "${ROOT_DIR}/etc/resolv.conf" "${ROOT_DIR}/etc/resolv.conf.orig"
|
|
||||||
sudo cp "/etc/resolv.conf" "${ROOT_DIR}/etc/resolv.conf"
|
|
||||||
sudo chroot "${ROOT_DIR}" /bin/sh -c "/stage2.sh ${TARGET_ARCH} ${TARGET_ARCH_ALT}"
|
|
||||||
sudo mv "${ROOT_DIR}/etc/resolv.conf.orig" "${ROOT_DIR}/etc/resolv.conf"
|
|
||||||
sudo rm -f "${ROOT_DIR}/stage2.sh"
|
|
||||||
sudo rm -f "${ROOT_DIR}/root-uuid"
|
|
||||||
|
|
||||||
{
|
|
||||||
echo "# krata fstab"
|
|
||||||
echo "UUID=${ROOT_UUID} / ext4 relatime 0 1"
|
|
||||||
echo "UUID=${EFI_UUID} / vfat rw,relatime,fmask=0133,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 2"
|
|
||||||
} | sudo tee "${ROOT_DIR}/etc/fstab" > /dev/null
|
|
||||||
|
|
||||||
cleanup
|
|
||||||
|
|
||||||
OS_SMALL_IMAGE="${TARGET_OS_DIR}/krata-${TARGET_ARCH}.small.qcow2"
|
|
||||||
qemu-img convert -O qcow2 "${OS_IMAGE}" "${OS_SMALL_IMAGE}"
|
|
||||||
mv -f "${OS_SMALL_IMAGE}" "${OS_IMAGE}"
|
|
@ -1,4 +1,4 @@
|
|||||||
FROM rust:1.79-alpine AS build
|
FROM rust:1.79-alpine@sha256:a454f49f2e15e233f829a0fd9a7cbdac64b6f38ec08aeac227595d4fc6eb6d4d AS build
|
||||||
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
||||||
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
FROM rust:1.79-alpine AS build
|
FROM rust:1.79-alpine@sha256:a454f49f2e15e233f829a0fd9a7cbdac64b6f38ec08aeac227595d4fc6eb6d4d AS build
|
||||||
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
||||||
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM rust:1.79-alpine AS build
|
FROM rust:1.79-alpine@sha256:a454f49f2e15e233f829a0fd9a7cbdac64b6f38ec08aeac227595d4fc6eb6d4d AS build
|
||||||
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
||||||
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM rust:1.79-alpine AS build
|
FROM rust:1.79-alpine@sha256:a454f49f2e15e233f829a0fd9a7cbdac64b6f38ec08aeac227595d4fc6eb6d4d AS build
|
||||||
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
RUN apk update && apk add protoc protobuf-dev build-base && rm -rf /var/cache/apk/*
|
||||||
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
ENV TARGET_LIBC=musl TARGET_VENDOR=unknown
|
||||||
|
|
||||||
|
@ -1,78 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
TARGET_ARCH="${1}"
|
|
||||||
apk add --update-cache alpine-base \
|
|
||||||
linux-lts linux-firmware-none \
|
|
||||||
mkinitfs dosfstools e2fsprogs \
|
|
||||||
tzdata chrony
|
|
||||||
|
|
||||||
apk add --allow-untrusted "/mnt/target/os/krata-${TARGET_ARCH}.apk"
|
|
||||||
|
|
||||||
for SERVICE in kratad kratanet
|
|
||||||
do
|
|
||||||
rc-update add "${SERVICE}" default
|
|
||||||
done
|
|
||||||
|
|
||||||
apk add xen xen-hypervisor
|
|
||||||
|
|
||||||
rc-update add xenstored default
|
|
||||||
|
|
||||||
for MODULE in xen-netblock xen-blkback tun tap
|
|
||||||
do
|
|
||||||
echo "${MODULE}" >> /etc/modules
|
|
||||||
done
|
|
||||||
|
|
||||||
cat > /etc/network/interfaces <<-EOF
|
|
||||||
auto eth0
|
|
||||||
iface eth0 inet dhcp
|
|
||||||
EOF
|
|
||||||
|
|
||||||
for SERVICE in networking chronyd
|
|
||||||
do
|
|
||||||
rc-update add "${SERVICE}" default
|
|
||||||
done
|
|
||||||
|
|
||||||
for SERVICE in devfs dmesg mdev hwdrivers cgroups
|
|
||||||
do
|
|
||||||
rc-update add "${SERVICE}" sysinit
|
|
||||||
done
|
|
||||||
|
|
||||||
for SERVICE in modules hwclock swap hostname sysctl bootmisc syslog seedrng
|
|
||||||
do
|
|
||||||
rc-update add "${SERVICE}" boot
|
|
||||||
done
|
|
||||||
|
|
||||||
for SERVICE in killprocs savecache mount-ro
|
|
||||||
do
|
|
||||||
rc-update add "${SERVICE}" shutdown
|
|
||||||
done
|
|
||||||
|
|
||||||
echo 'root:krata' | chpasswd
|
|
||||||
echo 'krata' > /etc/hostname
|
|
||||||
|
|
||||||
{
|
|
||||||
echo '# krata resolver configuration'
|
|
||||||
echo 'nameserver 1.1.1.1'
|
|
||||||
echo 'nameserver 1.0.0.1'
|
|
||||||
echo 'nameserver 2606:4700:4700::1111'
|
|
||||||
echo 'nameserver 2606:4700:4700::1001'
|
|
||||||
} > /etc/resolv.conf
|
|
||||||
|
|
||||||
{
|
|
||||||
echo 'Welcome to krataOS!'
|
|
||||||
echo 'You may now login to the console to manage krata.'
|
|
||||||
} > /etc/issue
|
|
||||||
|
|
||||||
echo > /etc/motd
|
|
||||||
|
|
||||||
ln -s /usr/share/zoneinfo/UTC /etc/localtime
|
|
||||||
|
|
||||||
rm -rf /var/cache/apk/*
|
|
||||||
rm -rf /.dockerenv
|
|
||||||
|
|
||||||
cd /
|
|
||||||
rm -f "/mnt/target/os/rootfs-${TARGET_ARCH}.tar"
|
|
||||||
tar cf "/mnt/target/os/rootfs-${TARGET_ARCH}.tar" --numeric-owner \
|
|
||||||
--exclude 'mnt/**' --exclude 'proc/**' \
|
|
||||||
--exclude 'sys/**' --exclude 'dev/**' .
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
TARGET_ARCH="${1}"
|
|
||||||
TARGET_ARCH_ALT="${2}"
|
|
||||||
apk add --update-cache grub-efi
|
|
||||||
grub-install --target="${TARGET_ARCH_ALT}-efi" --efi-directory=/boot/efi --no-nvram --skip-fs-probe --bootloader-id=BOOT
|
|
||||||
|
|
||||||
FROM_EFI_FILE="grubx64.efi"
|
|
||||||
TO_EFI_FILE="BOOTX64.efi"
|
|
||||||
if [ "${TARGET_ARCH}" = "aarch64" ]
|
|
||||||
then
|
|
||||||
FROM_EFI_FILE="grubaa64.efi"
|
|
||||||
TO_EFI_FILE="BOOTA64.efi"
|
|
||||||
fi
|
|
||||||
|
|
||||||
mv "/boot/efi/EFI/BOOT/${FROM_EFI_FILE}" "/boot/efi/EFI/BOOT/${TO_EFI_FILE}"
|
|
||||||
|
|
||||||
ROOT_UUID="$(cat /root-uuid)"
|
|
||||||
|
|
||||||
{
|
|
||||||
echo 'GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=1024M,max:1024M"'
|
|
||||||
echo "GRUB_CMDLINE_LINUX_DEFAULT=\"quiet rootfstype=ext4 root=UUID=${ROOT_UUID} modules=ext4\""
|
|
||||||
echo 'GRUB_DEFAULT="saved"'
|
|
||||||
echo 'GRUB_SAVEDEFAULT="true"'
|
|
||||||
} >> /etc/default/grub
|
|
||||||
|
|
||||||
# fix bug in grub detection of xen support
|
|
||||||
cp /boot/config-*-lts /boot/config-lts
|
|
||||||
grub-mkconfig -o /boot/grub/grub.cfg
|
|
||||||
grub-set-default "$(grep ^menuentry /boot/grub/grub.cfg | grep Xen | cut -d \' -f 2 | head -1)"
|
|
||||||
rm -rf /var/cache/apk/*
|
|
@ -14,7 +14,7 @@ changelog_path = "./CHANGELOG.md"
|
|||||||
changelog_include = [
|
changelog_include = [
|
||||||
"krata-daemon",
|
"krata-daemon",
|
||||||
"krata-ctl",
|
"krata-ctl",
|
||||||
"krata-guest",
|
"krata-zone",
|
||||||
"krata-network",
|
"krata-network",
|
||||||
"krata-runtime",
|
"krata-runtime",
|
||||||
"krata-oci",
|
"krata-oci",
|
||||||
|
@ -30,9 +30,9 @@ chmod +x /usr/sbin/kratad
|
|||||||
chmod +x /usr/sbin/kratanet
|
chmod +x /usr/sbin/kratanet
|
||||||
chmod +x /usr/bin/kratactl
|
chmod +x /usr/bin/kratactl
|
||||||
|
|
||||||
mkdir -p /var/lib/krata /usr/share/krata/guest
|
mkdir -p /var/lib/krata /usr/share/krata/zone
|
||||||
cp kernel /usr/share/krata/guest/kernel
|
cp kernel /usr/share/krata/zone/kernel
|
||||||
cp initrd /usr/share/krata/guest/initrd
|
cp initrd /usr/share/krata/zone/initrd
|
||||||
|
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl enable kratad.service kratanet.service
|
systemctl enable kratad.service kratanet.service
|
||||||
|
Reference in New Issue
Block a user