mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 21:21:32 +00:00
Compare commits
62 Commits
Author | SHA1 | Date | |
---|---|---|---|
218f848170 | |||
9d8c516a29 | |||
89055ef77c | |||
24c71e9725 | |||
0a6a112133 | |||
1627cbcdd7 | |||
f8247f13e4 | |||
6d07112e3d | |||
6cef03bffa | |||
73fd95dbe2 | |||
f41a1e2168 | |||
346cf4a7fa | |||
5e16f3149f | |||
ec9060d872 | |||
6050e99aa7 | |||
7cfdb27d23 | |||
87c4d7b0c3 | |||
4f84dfa3c7 | |||
dedc514944 | |||
9c0597157b | |||
3f8c9e7a7c | |||
6ed31bc436 | |||
2526065d74 | |||
a509f69398 | |||
d021fb2147 | |||
d33079c7c5 | |||
bea6d1e6fe | |||
bc27eeb286 | |||
42bb289421 | |||
f2ab03711e | |||
2f3daad80a | |||
8f7e47a218 | |||
19683b80c1 | |||
ae486e347f | |||
9f4db08e07 | |||
d674a69c17 | |||
f59976eb80 | |||
3641bc55f4 | |||
4b6272f49d | |||
7c55e63f24 | |||
2083ec0604 | |||
5ad2e40a7b | |||
0fd6318c5f | |||
7940eea588 | |||
45aa4914bb | |||
8dd3cc7692 | |||
6a2f1e6517 | |||
d433cd49e2 | |||
0fd32e84cd | |||
ab8941326a | |||
8be75a722e | |||
e6f35eb3d0 | |||
58c6413ca2 | |||
e1f1f8579c | |||
6bf1d3f88c | |||
377b837db9 | |||
6cd8cc12db | |||
c68f367e4a | |||
15d5ed5a45 | |||
6d6bdade87 | |||
693d62a41a | |||
8ec7042ea4 |
12
.github/workflows/check.yml
vendored
12
.github/workflows/check.yml
vendored
@ -1,11 +1,19 @@
|
||||
name: check
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
merge_group:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
fmt:
|
||||
name: fmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
@ -16,4 +24,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- run: ./hack/code/shellcheck.sh
|
||||
|
14
.github/workflows/client.yml
vendored
14
.github/workflows/client.yml
vendored
@ -1,5 +1,11 @@
|
||||
name: client
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
merge_group:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
@ -16,12 +22,16 @@ jobs:
|
||||
TARGET_OS: "${{ matrix.platform.os }}"
|
||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||
runs-on: "${{ matrix.platform.on }}"
|
||||
name: build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
name: client build ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false && git config --global core.eol lf
|
||||
if: ${{ matrix.platform.os == 'windows' }}
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
if: ${{ matrix.platform.os != 'darwin' }}
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
14
.github/workflows/kernel.yml
vendored
14
.github/workflows/kernel.yml
vendored
@ -1,26 +1,30 @@
|
||||
name: kernel
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "kernel/**"
|
||||
- "hack/ci/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "kernel/**"
|
||||
- "hack/ci/**"
|
||||
merge_group:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: build ${{ matrix.arch }}
|
||||
name: kernel build ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: ./hack/ci/install-linux-deps.sh
|
||||
- run: ./hack/kernel/build.sh
|
||||
|
18
.github/workflows/nightly.yml
vendored
18
.github/workflows/nightly.yml
vendored
@ -14,9 +14,11 @@ jobs:
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: server ${{ matrix.arch }}
|
||||
name: nightly server ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
||||
@ -68,12 +70,16 @@ jobs:
|
||||
TARGET_OS: "${{ matrix.platform.os }}"
|
||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||
runs-on: "${{ matrix.platform.on }}"
|
||||
name: client ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
name: nightly client ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- run: git config --global core.autocrlf false && git config --global core.eol lf
|
||||
if: ${{ matrix.platform.os == 'windows' }}
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
if: ${{ matrix.platform.os != 'darwin' }}
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
@ -87,4 +93,10 @@ jobs:
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
path: "target/*/release/kratactl*"
|
||||
path: "target/*/release/kratactl"
|
||||
if: ${{ matrix.platform.os != 'windows' }}
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kratactl-${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
path: "target/*/release/kratactl.exe"
|
||||
if: ${{ matrix.platform.os == 'windows' }}
|
||||
|
15
.github/workflows/os.yml
vendored
15
.github/workflows/os.yml
vendored
@ -1,28 +1,31 @@
|
||||
name: os
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "os/**"
|
||||
- "hack/os/**"
|
||||
- "hack/ci/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "os/**"
|
||||
- "hack/os/**"
|
||||
- "hack/ci/**"
|
||||
merge_group:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: build ${{ matrix.arch }}
|
||||
name: os build ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
||||
|
94
.github/workflows/release-binaries.yml
vendored
Normal file
94
.github/workflows/release-binaries.yml
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
name: release-binaries
|
||||
permissions:
|
||||
contents: write
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
RUSTUP_MAX_RETRIES: 10
|
||||
jobs:
|
||||
server:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: release-binaries server ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
||||
- run: ./hack/ci/install-linux-deps.sh
|
||||
- run: ./hack/dist/bundle.sh
|
||||
env:
|
||||
KRATA_KERNEL_BUILD_JOBS: "5"
|
||||
- run: "./hack/ci/assemble-release-assets.sh bundle-systemd ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/bundle-systemd-${{ matrix.arch }}.tgz"
|
||||
- run: ./hack/dist/deb.sh
|
||||
env:
|
||||
KRATA_KERNEL_BUILD_SKIP: "1"
|
||||
- run: "./hack/ci/assemble-release-assets.sh debian ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*.deb"
|
||||
- run: ./hack/dist/apk.sh
|
||||
env:
|
||||
KRATA_KERNEL_BUILD_SKIP: "1"
|
||||
- run: "./hack/ci/assemble-release-assets.sh alpine ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/dist/*_${{ matrix.arch }}.apk"
|
||||
- run: ./hack/os/build.sh
|
||||
env:
|
||||
KRATA_KERNEL_BUILD_SKIP: "1"
|
||||
- run: "./hack/ci/assemble-release-assets.sh os ${{ github.event.release.tag_name }} ${{ matrix.arch }} target/os/krata-${{ matrix.arch }}.qcow2"
|
||||
- run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
client:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- { os: linux, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||
- { os: linux, arch: aarch64, on: ubuntu-latest, deps: linux }
|
||||
- { os: darwin, arch: x86_64, on: macos-14, deps: darwin }
|
||||
- { os: darwin, arch: aarch64, on: macos-14, deps: darwin }
|
||||
- { os: freebsd, arch: x86_64, on: ubuntu-latest, deps: linux }
|
||||
- { os: windows, arch: x86_64, on: windows-latest, deps: windows }
|
||||
env:
|
||||
TARGET_OS: "${{ matrix.platform.os }}"
|
||||
TARGET_ARCH: "${{ matrix.platform.arch }}"
|
||||
runs-on: "${{ matrix.platform.on }}"
|
||||
name: release-binaries client ${{ matrix.platform.os }}-${{ matrix.platform.arch }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
if: ${{ matrix.platform.os != 'darwin' }}
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "${{ matrix.platform.arch }}-apple-darwin"
|
||||
if: ${{ matrix.platform.os == 'darwin' }}
|
||||
- uses: homebrew/actions/setup-homebrew@master
|
||||
if: ${{ matrix.platform.os == 'darwin' }}
|
||||
- run: ./hack/ci/install-${{ matrix.platform.deps }}-deps.sh
|
||||
- run: ./hack/build/cargo.sh build --release --bin kratactl
|
||||
- run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl"
|
||||
if: ${{ matrix.platform.os != 'windows' }}
|
||||
- run: "./hack/ci/assemble-release-assets.sh kratactl ${{ github.event.release.tag_name }} ${{ matrix.platform.os }}-${{ matrix.platform.arch }} target/*/release/kratactl.exe"
|
||||
if: ${{ matrix.platform.os == 'windows' }}
|
||||
- run: "./hack/ci/upload-release-assets.sh ${{ github.event.release.tag_name }}"
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
33
.github/workflows/release-plz.yml
vendored
Normal file
33
.github/workflows/release-plz.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
name: release-plz
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}"
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
release-plz:
|
||||
name: release-plz
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@v1
|
||||
id: generate-token
|
||||
with:
|
||||
app-id: "${{ secrets.EDERA_CULTIVATION_APP_ID }}"
|
||||
private-key: "${{ secrets.EDERA_CULTIVATION_APP_PRIVATE_KEY }}"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
token: "${{ steps.generate-token.outputs.token }}"
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: ./hack/ci/install-linux-deps.sh
|
||||
- name: release-plz
|
||||
uses: MarcoIeni/release-plz-action@v0.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}"
|
||||
CARGO_REGISTRY_TOKEN: "${{ secrets.KRATA_RELEASE_CARGO_TOKEN }}"
|
25
.github/workflows/server.yml
vendored
25
.github/workflows/server.yml
vendored
@ -1,18 +1,27 @@
|
||||
name: server
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
merge_group:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: build ${{ matrix.arch }}
|
||||
name: server build ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: ./hack/ci/install-linux-deps.sh
|
||||
- run: ./hack/build/cargo.sh build
|
||||
@ -25,9 +34,11 @@ jobs:
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: test ${{ matrix.arch }}
|
||||
name: server test ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: ./hack/ci/install-linux-deps.sh
|
||||
- run: ./hack/build/cargo.sh test
|
||||
@ -40,9 +51,11 @@ jobs:
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: clippy ${{ matrix.arch }}
|
||||
name: server clippy ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
@ -57,9 +70,11 @@ jobs:
|
||||
- aarch64
|
||||
env:
|
||||
TARGET_ARCH: "${{ matrix.arch }}"
|
||||
name: initrd ${{ matrix.arch }}
|
||||
name: server initrd ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "${{ matrix.arch }}-unknown-linux-gnu,${{ matrix.arch }}-unknown-linux-musl"
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,2 +1 @@
|
||||
/target
|
||||
Cargo.lock
|
||||
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "vendor"]
|
||||
path = vendor
|
||||
url = https://github.com/edera-dev/krata-vendor.git
|
64
CHANGELOG.md
Normal file
64
CHANGELOG.md
Normal file
@ -0,0 +1,64 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.0.9](https://github.com/edera-dev/krata/compare/v0.0.8...v0.0.9) - 2024-04-15
|
||||
|
||||
### Added
|
||||
- oci compliance work ([#85](https://github.com/edera-dev/krata/pull/85))
|
||||
- oci packer can now use mksquashfs if available ([#70](https://github.com/edera-dev/krata/pull/70))
|
||||
- basic kratactl top command ([#72](https://github.com/edera-dev/krata/pull/72))
|
||||
- idm snooping ([#71](https://github.com/edera-dev/krata/pull/71))
|
||||
- implement oci image progress ([#64](https://github.com/edera-dev/krata/pull/64))
|
||||
- guest metrics support ([#46](https://github.com/edera-dev/krata/pull/46))
|
||||
|
||||
### Other
|
||||
- init: default to xterm if TERM is not set ([#52](https://github.com/edera-dev/krata/pull/52))
|
||||
- update Cargo.toml dependencies
|
||||
|
||||
## [0.0.8](https://github.com/edera-dev/krata/compare/v0.0.7...v0.0.8) - 2024-04-09
|
||||
|
||||
### Other
|
||||
- update Cargo.lock dependencies
|
||||
|
||||
## [0.0.7](https://github.com/edera-dev/krata/compare/v0.0.6...v0.0.7) - 2024-04-09
|
||||
|
||||
### Other
|
||||
- update Cargo.toml dependencies
|
||||
- update Cargo.lock dependencies
|
||||
|
||||
## [0.0.6](https://github.com/edera-dev/krata/compare/v0.0.5...v0.0.6) - 2024-04-09
|
||||
|
||||
### Fixed
|
||||
- increase channel acquisition timeout to support lower performance hosts ([#36](https://github.com/edera-dev/krata/pull/36))
|
||||
|
||||
### Other
|
||||
- update Cargo.toml dependencies
|
||||
- update Cargo.lock dependencies
|
||||
|
||||
## [0.0.5](https://github.com/edera-dev/krata/compare/v0.0.4...v0.0.5) - 2024-04-09
|
||||
|
||||
### Added
|
||||
- *(ctl)* add help and about to commands and arguments ([#25](https://github.com/edera-dev/krata/pull/25))
|
||||
|
||||
### Other
|
||||
- update Cargo.toml dependencies
|
||||
- update Cargo.lock dependencies
|
||||
|
||||
## [0.0.4](https://github.com/edera-dev/krata/releases/tag/v${version}) - 2024-04-03
|
||||
|
||||
### Other
|
||||
- implement automatic releases
|
||||
- reimplement console to utilize channels, and provide logs support
|
||||
- set hostname from launch config
|
||||
- implement event stream retries
|
||||
- work on parallel reconciliation
|
||||
- implement parallel guest reconciliation
|
||||
- log when a guest start failures occurs
|
||||
- remove device restriction
|
||||
- setup loopback interface
|
||||
- place running tasks in cgroup
|
3599
Cargo.lock
generated
Normal file
3599
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
35
Cargo.toml
35
Cargo.toml
@ -16,7 +16,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.0.3"
|
||||
version = "0.0.9"
|
||||
homepage = "https://krata.dev"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/edera-dev/krata"
|
||||
@ -24,20 +24,26 @@ repository = "https://github.com/edera-dev/krata"
|
||||
[workspace.dependencies]
|
||||
anyhow = "1.0"
|
||||
arrayvec = "0.7.4"
|
||||
async-compression = "0.4.6"
|
||||
async-compression = "0.4.8"
|
||||
async-stream = "0.3.5"
|
||||
async-trait = "0.1.77"
|
||||
async-trait = "0.1.80"
|
||||
backhand = "0.15.0"
|
||||
byteorder = "1"
|
||||
bytes = "1.5.0"
|
||||
cli-tables = "0.2.1"
|
||||
cgroups-rs = "0.3.4"
|
||||
circular-buffer = "0.1.7"
|
||||
comfy-table = "7.1.1"
|
||||
crossterm = "0.27.0"
|
||||
ctrlc = "3.4.4"
|
||||
elf = "0.7.4"
|
||||
env_logger = "0.11.0"
|
||||
etherparse = "0.14.2"
|
||||
etherparse = "0.14.3"
|
||||
fancy-duration = "0.9.2"
|
||||
flate2 = "1.0"
|
||||
futures = "0.3.30"
|
||||
human_bytes = "0.4"
|
||||
indexmap = "2.2.6"
|
||||
indicatif = "0.17.8"
|
||||
ipnetwork = "0.20.0"
|
||||
libc = "0.2"
|
||||
log = "0.4.20"
|
||||
@ -50,20 +56,25 @@ oci-spec = "0.6.4"
|
||||
once_cell = "1.19.0"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
prost = "0.12.3"
|
||||
prost-build = "0.12.3"
|
||||
prost = "0.12.4"
|
||||
prost-build = "0.12.4"
|
||||
prost-reflect-build = "0.13.0"
|
||||
prost-types = "0.12.4"
|
||||
rand = "0.8.5"
|
||||
ratatui = "0.26.1"
|
||||
redb = "2.0.0"
|
||||
rtnetlink = "0.14.1"
|
||||
scopeguard = "1.2.0"
|
||||
serde_json = "1.0.113"
|
||||
serde_yaml = "0.9"
|
||||
sha256 = "1.5.0"
|
||||
signal-hook = "0.3.17"
|
||||
slice-copy = "0.3.0"
|
||||
smoltcp = "0.11.0"
|
||||
sysinfo = "0.30.10"
|
||||
termtree = "0.4.1"
|
||||
thiserror = "1.0"
|
||||
tokio-tun = "0.11.2"
|
||||
tokio-tun = "0.11.4"
|
||||
tonic-build = "0.11.0"
|
||||
tower = "0.4.13"
|
||||
udp-stream = "0.0.11"
|
||||
@ -76,11 +87,11 @@ version = "4.4.18"
|
||||
features = ["derive"]
|
||||
|
||||
[workspace.dependencies.prost-reflect]
|
||||
version = "0.13.0"
|
||||
version = "0.13.1"
|
||||
features = ["derive"]
|
||||
|
||||
[workspace.dependencies.reqwest]
|
||||
version = "0.12.0"
|
||||
version = "0.12.3"
|
||||
default-features = false
|
||||
features = ["rustls-tls"]
|
||||
|
||||
@ -107,3 +118,7 @@ features = ["tls"]
|
||||
[workspace.dependencies.uuid]
|
||||
version = "1.6.1"
|
||||
features = ["v4"]
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
strip = "symbols"
|
||||
|
15
DEV.md
15
DEV.md
@ -28,10 +28,21 @@ it's corresponding code path from the above table.
|
||||
|
||||
1. Install the specified Debian version on a x86_64 host _capable_ of KVM (NOTE: KVM is not used, Xen is a type-1 hypervisor).
|
||||
|
||||
2. Install required packages: `apt install git xen-system-amd64 flex bison libelf-dev libssl-dev bc`
|
||||
2. Install required packages:
|
||||
|
||||
```sh
|
||||
$ apt install git xen-system-amd64 build-essential libclang-dev musl-tools flex bison libelf-dev libssl-dev bc protobuf-compiler libprotobuf-dev squashfs-tools erofs-utils
|
||||
```
|
||||
|
||||
3. Install [rustup](https://rustup.rs) for managing a Rust environment.
|
||||
|
||||
Make sure to install the targets that you need for krata:
|
||||
|
||||
```sh
|
||||
$ rustup target add x86_64-unknown-linux-gnu
|
||||
$ rustup target add x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
4. Configure `/etc/default/grub.d/xen.cfg` to give krata guests some room:
|
||||
|
||||
```sh
|
||||
@ -43,7 +54,7 @@ After changing the grub config, update grub: `update-grub`
|
||||
|
||||
Then reboot to boot the system as a Xen dom0.
|
||||
|
||||
You can validate that Xen is setup by running `xl info` and ensuring it returns useful information about the Xen hypervisor.
|
||||
You can validate that Xen is setup by running `dmesg | grep "Hypervisor detected"` and ensuring it returns a line like `Hypervisor detected: Xen PV`, if that is missing, the host is not running under Xen.
|
||||
|
||||
5. Clone the krata source code:
|
||||
```sh
|
||||
|
2
FAQ.md
2
FAQ.md
@ -2,7 +2,7 @@
|
||||
|
||||
## How does krata currently work?
|
||||
|
||||
The krata hypervisor makes it possible to launch OCI containers on a Xen hypervisor without utilizing the Xen userspace tooling. krata contains just enough of the userspace of Xen (reimplemented in Rust) to start an x86_64 Xen Linux PV guest, and implements a Linux init process that can boot an OCI container. It does so by converting an OCI image into a squashfs file and packaging basic startup data in a bundle which the init container can read.
|
||||
The krata hypervisor makes it possible to launch OCI containers on a Xen hypervisor without utilizing the Xen userspace tooling. krata contains just enough of the userspace of Xen (reimplemented in Rust) to start an x86_64 Xen Linux PV guest, and implements a Linux init process that can boot an OCI container. It does so by converting an OCI image into a squashfs/erofs file and packaging basic startup data in a bundle which the init container can read.
|
||||
|
||||
In addition, due to the desire to reduce dependence on the dom0 network, krata contains a networking daemon called kratanet. kratanet listens for krata guests to startup and launches a userspace networking environment. krata guests can access the dom0 networking stack via the proxynat layer that makes it possible to communicate over UDP, TCP, and ICMP (echo only) to the outside world. In addition, each krata guest is provided a "gateway" IP (both in IPv4 and IPv6) which utilizes smoltcp to provide a virtual host. That virtual host in the future could dial connections into the container to access container networking resources.
|
||||
|
||||
|
11
README.md
11
README.md
@ -16,4 +16,13 @@ The Edera Hypervisor
|
||||
|
||||
## Introduction
|
||||
|
||||
krata is a single-host hypervisor service built primarily for OCI containers.
|
||||
krata is a single-host hypervisor service built for OCI-compliant containers. It isolates containers using a type-1 hypervisor, providing workload isolation that can exceed the security level of KVM-based OCI-compliant runtimes.
|
||||
|
||||
krata utilizes the core of the Xen hypervisor, with a fully memory-safe Rust control plane to bring Xen tooling into a new secure era.
|
||||
|
||||
## Hardware Support
|
||||
|
||||
| Architecture | Completion Level | Virtualization Technology |
|
||||
| ------------ | ---------------- | ------------------------- |
|
||||
| x86_64 | 100% Completed | Intel VT-x, AMD-V |
|
||||
| aarch64 | 30% Completed | AArch64 virtualization |
|
||||
|
@ -2,7 +2,7 @@
|
||||
name = "krata-ctl"
|
||||
description = "Command-line tool to control the krata hypervisor"
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -12,15 +12,21 @@ resolver = "2"
|
||||
anyhow = { workspace = true }
|
||||
async-stream = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
cli-tables = { workspace = true }
|
||||
crossterm = { workspace = true }
|
||||
comfy-table = { workspace = true }
|
||||
crossterm = { workspace = true, features = ["event-stream"] }
|
||||
ctrlc = { workspace = true, features = ["termination"] }
|
||||
env_logger = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.3" }
|
||||
fancy-duration = { workspace = true }
|
||||
human_bytes = { workspace = true }
|
||||
indicatif = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.9" }
|
||||
log = { workspace = true }
|
||||
prost-reflect = { workspace = true, features = ["serde"] }
|
||||
prost-types = { workspace = true }
|
||||
ratatui = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
termtree = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
tonic = { workspace = true }
|
||||
|
@ -10,8 +10,9 @@ use crate::console::StdioConsoleStream;
|
||||
use super::resolve_guest;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Attach to the guest console")]
|
||||
pub struct AttachCommand {
|
||||
#[arg()]
|
||||
#[arg(help = "Guest to attach to, either the name or the uuid")]
|
||||
guest: String,
|
||||
}
|
||||
|
||||
|
@ -17,10 +17,15 @@ use tonic::{transport::Channel, Request};
|
||||
use crate::cli::resolve_guest;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Destroy a guest")]
|
||||
pub struct DestroyCommand {
|
||||
#[arg(short = 'W', long)]
|
||||
#[arg(
|
||||
short = 'W',
|
||||
long,
|
||||
help = "Wait for the destruction of the guest to complete"
|
||||
)]
|
||||
wait: bool,
|
||||
#[arg()]
|
||||
#[arg(help = "Guest to destroy, either the name or the uuid")]
|
||||
guest: String,
|
||||
}
|
||||
|
||||
@ -47,34 +52,31 @@ impl DestroyCommand {
|
||||
async fn wait_guest_destroyed(id: &str, events: EventStream) -> Result<()> {
|
||||
let mut stream = events.subscribe();
|
||||
while let Ok(event) = stream.recv().await {
|
||||
match event {
|
||||
Event::GuestChanged(changed) => {
|
||||
let Some(guest) = changed.guest else {
|
||||
continue;
|
||||
};
|
||||
let Event::GuestChanged(changed) = event;
|
||||
let Some(guest) = changed.guest else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if guest.id != id {
|
||||
continue;
|
||||
}
|
||||
if guest.id != id {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
let Some(state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(ref error) = state.error_info {
|
||||
if state.status() == GuestStatus::Failed {
|
||||
error!("destroy failed: {}", error.message);
|
||||
std::process::exit(1);
|
||||
} else {
|
||||
error!("guest error: {}", error.message);
|
||||
}
|
||||
}
|
||||
|
||||
if state.status() == GuestStatus::Destroyed {
|
||||
std::process::exit(0);
|
||||
}
|
||||
if let Some(ref error) = state.error_info {
|
||||
if state.status() == GuestStatus::Failed {
|
||||
error!("destroy failed: {}", error.message);
|
||||
std::process::exit(1);
|
||||
} else {
|
||||
error!("guest error: {}", error.message);
|
||||
}
|
||||
}
|
||||
|
||||
if state.status() == GuestStatus::Destroyed {
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
74
crates/ctl/src/cli/idm_snoop.rs
Normal file
74
crates/ctl/src/cli/idm_snoop.rs
Normal file
@ -0,0 +1,74 @@
|
||||
use anyhow::Result;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use krata::{
|
||||
events::EventStream,
|
||||
v1::control::{control_service_client::ControlServiceClient, SnoopIdmReply, SnoopIdmRequest},
|
||||
};
|
||||
|
||||
use tokio_stream::StreamExt;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::format::{kv2line, proto2dynamic, proto2kv};
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||
enum IdmSnoopFormat {
|
||||
Simple,
|
||||
Jsonl,
|
||||
KeyValue,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Snoop on the IDM bus")]
|
||||
pub struct IdmSnoopCommand {
|
||||
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||
format: IdmSnoopFormat,
|
||||
}
|
||||
|
||||
impl IdmSnoopCommand {
|
||||
pub async fn run(
|
||||
self,
|
||||
mut client: ControlServiceClient<Channel>,
|
||||
_events: EventStream,
|
||||
) -> Result<()> {
|
||||
let mut stream = client.snoop_idm(SnoopIdmRequest {}).await?.into_inner();
|
||||
|
||||
while let Some(reply) = stream.next().await {
|
||||
let reply = reply?;
|
||||
match self.format {
|
||||
IdmSnoopFormat::Simple => {
|
||||
self.print_simple(reply)?;
|
||||
}
|
||||
|
||||
IdmSnoopFormat::Jsonl => {
|
||||
let value = serde_json::to_value(proto2dynamic(reply)?)?;
|
||||
let encoded = serde_json::to_string(&value)?;
|
||||
println!("{}", encoded.trim());
|
||||
}
|
||||
|
||||
IdmSnoopFormat::KeyValue => {
|
||||
self.print_key_value(reply)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_simple(&self, reply: SnoopIdmReply) -> Result<()> {
|
||||
let from = reply.from;
|
||||
let to = reply.to;
|
||||
let Some(packet) = reply.packet else {
|
||||
return Ok(());
|
||||
};
|
||||
let value = serde_json::to_value(proto2dynamic(packet)?)?;
|
||||
let encoded = serde_json::to_string(&value)?;
|
||||
println!("({} -> {}) {}", from, to, encoded);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_key_value(&self, reply: SnoopIdmReply) -> Result<()> {
|
||||
let kvs = proto2kv(reply)?;
|
||||
println!("{}", kv2line(kvs));
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -6,12 +6,12 @@ use krata::{
|
||||
events::EventStream,
|
||||
v1::{
|
||||
common::{
|
||||
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestStatus,
|
||||
GuestTaskSpec, GuestTaskSpecEnvVar,
|
||||
guest_image_spec::Image, GuestImageSpec, GuestOciImageFormat, GuestOciImageSpec,
|
||||
GuestSpec, GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar,
|
||||
},
|
||||
control::{
|
||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||
CreateGuestRequest,
|
||||
CreateGuestRequest, PullImageRequest,
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -19,26 +19,53 @@ use log::error;
|
||||
use tokio::select;
|
||||
use tonic::{transport::Channel, Request};
|
||||
|
||||
use crate::console::StdioConsoleStream;
|
||||
use crate::{console::StdioConsoleStream, pull::pull_interactive_progress};
|
||||
|
||||
use super::pull::PullImageFormat;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Launch a new guest")]
|
||||
pub struct LauchCommand {
|
||||
#[arg(short, long)]
|
||||
#[arg(short = 'S', long, default_value = "squashfs", help = "Image format")]
|
||||
image_format: PullImageFormat,
|
||||
#[arg(short, long, help = "Name of the guest")]
|
||||
name: Option<String>,
|
||||
#[arg(short, long, default_value_t = 1)]
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
default_value_t = 1,
|
||||
help = "vCPUs available to the guest"
|
||||
)]
|
||||
cpus: u32,
|
||||
#[arg(short, long, default_value_t = 512)]
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
default_value_t = 512,
|
||||
help = "Memory available to the guest, in megabytes"
|
||||
)]
|
||||
mem: u64,
|
||||
#[arg[short, long]]
|
||||
#[arg[short, long, help = "Environment variables set in the guest"]]
|
||||
env: Option<Vec<String>>,
|
||||
#[arg(short, long)]
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Attach to the guest after guest starts, implies --wait"
|
||||
)]
|
||||
attach: bool,
|
||||
#[arg(short = 'W', long)]
|
||||
#[arg(
|
||||
short = 'W',
|
||||
long,
|
||||
help = "Wait for the guest to start, implied by --attach"
|
||||
)]
|
||||
wait: bool,
|
||||
#[arg()]
|
||||
#[arg(help = "Container image for guest to use")]
|
||||
oci: String,
|
||||
#[arg(allow_hyphen_values = true, trailing_var_arg = true)]
|
||||
run: Vec<String>,
|
||||
#[arg(
|
||||
allow_hyphen_values = true,
|
||||
trailing_var_arg = true,
|
||||
help = "Command to run inside the guest"
|
||||
)]
|
||||
command: Vec<String>,
|
||||
}
|
||||
|
||||
impl LauchCommand {
|
||||
@ -47,11 +74,25 @@ impl LauchCommand {
|
||||
mut client: ControlServiceClient<Channel>,
|
||||
events: EventStream,
|
||||
) -> Result<()> {
|
||||
let response = client
|
||||
.pull_image(PullImageRequest {
|
||||
image: self.oci.clone(),
|
||||
format: match self.image_format {
|
||||
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
||||
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||
|
||||
let request = CreateGuestRequest {
|
||||
spec: Some(GuestSpec {
|
||||
name: self.name.unwrap_or_default(),
|
||||
image: Some(GuestImageSpec {
|
||||
image: Some(Image::Oci(GuestOciImageSpec { image: self.oci })),
|
||||
image: Some(Image::Oci(GuestOciImageSpec {
|
||||
digest: reply.digest,
|
||||
format: reply.format,
|
||||
})),
|
||||
}),
|
||||
vcpus: self.cpus,
|
||||
mem: self.mem,
|
||||
@ -63,7 +104,7 @@ impl LauchCommand {
|
||||
value: value.clone(),
|
||||
})
|
||||
.collect(),
|
||||
command: self.run,
|
||||
command: self.command,
|
||||
}),
|
||||
annotations: vec![],
|
||||
}),
|
||||
|
@ -1,10 +1,10 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use clap::{Parser, ValueEnum};
|
||||
use cli_tables::Table;
|
||||
use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
|
||||
use krata::{
|
||||
events::EventStream,
|
||||
v1::{
|
||||
common::{guest_image_spec::Image, Guest, GuestStatus},
|
||||
common::{Guest, GuestStatus},
|
||||
control::{
|
||||
control_service_client::ControlServiceClient, ListGuestsRequest, ResolveGuestRequest,
|
||||
},
|
||||
@ -14,11 +14,11 @@ use krata::{
|
||||
use serde_json::Value;
|
||||
use tonic::{transport::Channel, Request};
|
||||
|
||||
use crate::format::{guest_state_text, guest_status_text, kv2line, proto2dynamic, proto2kv};
|
||||
use crate::format::{guest_simple_line, guest_status_text, kv2line, proto2dynamic, proto2kv};
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||
enum ListFormat {
|
||||
CliTable,
|
||||
Table,
|
||||
Json,
|
||||
JsonPretty,
|
||||
Jsonl,
|
||||
@ -28,10 +28,11 @@ enum ListFormat {
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "List the guests on the hypervisor")]
|
||||
pub struct ListCommand {
|
||||
#[arg(short, long, default_value = "cli-table")]
|
||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||
format: ListFormat,
|
||||
#[arg()]
|
||||
#[arg(help = "Limit to a single guest, either the name or the uuid")]
|
||||
guest: Option<String>,
|
||||
}
|
||||
|
||||
@ -70,24 +71,13 @@ impl ListCommand {
|
||||
});
|
||||
|
||||
match self.format {
|
||||
ListFormat::CliTable => {
|
||||
ListFormat::Table => {
|
||||
self.print_guest_table(guests)?;
|
||||
}
|
||||
|
||||
ListFormat::Simple => {
|
||||
for guest in guests {
|
||||
let state = guest_status_text(
|
||||
guest
|
||||
.state
|
||||
.as_ref()
|
||||
.map(|x| x.status())
|
||||
.unwrap_or(GuestStatus::Unknown),
|
||||
);
|
||||
let name = guest.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
||||
let network = guest.state.as_ref().and_then(|x| x.network.as_ref());
|
||||
let ipv4 = network.map(|x| x.guest_ipv4.as_str()).unwrap_or("");
|
||||
let ipv6 = network.map(|x| x.guest_ipv6.as_str()).unwrap_or("");
|
||||
println!("{}\t{}\t{}\t{}\t{}", guest.id, state, name, ipv4, ipv6);
|
||||
println!("{}", guest_simple_line(&guest));
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,49 +115,51 @@ impl ListCommand {
|
||||
|
||||
fn print_guest_table(&self, guests: Vec<Guest>) -> Result<()> {
|
||||
let mut table = Table::new();
|
||||
let header = vec!["name", "uuid", "state", "ipv4", "ipv6", "image"];
|
||||
table.push_row(&header)?;
|
||||
table.load_preset(UTF8_FULL_CONDENSED);
|
||||
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
||||
for guest in guests {
|
||||
let ipv4 = guest
|
||||
.state
|
||||
.as_ref()
|
||||
.and_then(|x| x.network.as_ref())
|
||||
.map(|x| x.guest_ipv4.as_str())
|
||||
.unwrap_or("unknown");
|
||||
.unwrap_or("n/a");
|
||||
let ipv6 = guest
|
||||
.state
|
||||
.as_ref()
|
||||
.and_then(|x| x.network.as_ref())
|
||||
.map(|x| x.guest_ipv6.as_str())
|
||||
.unwrap_or("unknown");
|
||||
.unwrap_or("n/a");
|
||||
let Some(spec) = guest.spec else {
|
||||
continue;
|
||||
};
|
||||
let image = spec
|
||||
.image
|
||||
.map(|x| {
|
||||
x.image
|
||||
.map(|y| match y {
|
||||
Image::Oci(oci) => oci.image,
|
||||
})
|
||||
.unwrap_or("unknown".to_string())
|
||||
})
|
||||
.unwrap_or("unknown".to_string());
|
||||
table.push_row_string(&vec![
|
||||
spec.name,
|
||||
guest.id,
|
||||
format!("{}", guest_state_text(guest.state.as_ref())),
|
||||
ipv4.to_string(),
|
||||
ipv6.to_string(),
|
||||
image,
|
||||
])?;
|
||||
let status = guest.state.as_ref().cloned().unwrap_or_default().status();
|
||||
let status_text = guest_status_text(status);
|
||||
|
||||
let status_color = match status {
|
||||
GuestStatus::Destroyed | GuestStatus::Failed => Color::Red,
|
||||
GuestStatus::Destroying | GuestStatus::Exited | GuestStatus::Starting => {
|
||||
Color::Yellow
|
||||
}
|
||||
GuestStatus::Started => Color::Green,
|
||||
_ => Color::Reset,
|
||||
};
|
||||
|
||||
table.add_row(vec![
|
||||
Cell::new(spec.name),
|
||||
Cell::new(guest.id),
|
||||
Cell::new(status_text).fg(status_color),
|
||||
Cell::new(ipv4.to_string()),
|
||||
Cell::new(ipv6.to_string()),
|
||||
]);
|
||||
}
|
||||
if table.num_records() == 1 {
|
||||
if table.is_empty() {
|
||||
if self.guest.is_none() {
|
||||
println!("no guests have been launched");
|
||||
}
|
||||
} else {
|
||||
println!("{}", table.to_string());
|
||||
println!("{}", table);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
58
crates/ctl/src/cli/logs.rs
Normal file
58
crates/ctl/src/cli/logs.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use anyhow::Result;
|
||||
use async_stream::stream;
|
||||
use clap::Parser;
|
||||
use krata::{
|
||||
events::EventStream,
|
||||
v1::control::{control_service_client::ControlServiceClient, ConsoleDataRequest},
|
||||
};
|
||||
|
||||
use tokio::select;
|
||||
use tokio_stream::{pending, StreamExt};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::console::StdioConsoleStream;
|
||||
|
||||
use super::resolve_guest;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "View the logs of a guest")]
|
||||
pub struct LogsCommand {
|
||||
#[arg(short, long, help = "Follow output from the guest")]
|
||||
follow: bool,
|
||||
#[arg(help = "Guest to show logs for, either the name or the uuid")]
|
||||
guest: String,
|
||||
}
|
||||
|
||||
impl LogsCommand {
|
||||
pub async fn run(
|
||||
self,
|
||||
mut client: ControlServiceClient<Channel>,
|
||||
events: EventStream,
|
||||
) -> Result<()> {
|
||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
||||
let guest_id_stream = guest_id.clone();
|
||||
let follow = self.follow;
|
||||
let input = stream! {
|
||||
yield ConsoleDataRequest { guest_id: guest_id_stream, data: Vec::new() };
|
||||
if follow {
|
||||
let mut pending = pending::<ConsoleDataRequest>();
|
||||
while let Some(x) = pending.next().await {
|
||||
yield x;
|
||||
}
|
||||
}
|
||||
};
|
||||
let output = client.console_data(input).await?.into_inner();
|
||||
let stdout_handle =
|
||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(guest_id.clone(), events).await?;
|
||||
let code = select! {
|
||||
x = stdout_handle => {
|
||||
x??;
|
||||
None
|
||||
},
|
||||
x = exit_hook_task => x?
|
||||
};
|
||||
StdioConsoleStream::restore_terminal_mode();
|
||||
std::process::exit(code.unwrap_or(0));
|
||||
}
|
||||
}
|
83
crates/ctl/src/cli/metrics.rs
Normal file
83
crates/ctl/src/cli/metrics.rs
Normal file
@ -0,0 +1,83 @@
|
||||
use anyhow::Result;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use krata::{
|
||||
events::EventStream,
|
||||
v1::{
|
||||
common::GuestMetricNode,
|
||||
control::{control_service_client::ControlServiceClient, ReadGuestMetricsRequest},
|
||||
},
|
||||
};
|
||||
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
|
||||
|
||||
use super::resolve_guest;
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||
enum MetricsFormat {
|
||||
Tree,
|
||||
Json,
|
||||
JsonPretty,
|
||||
Yaml,
|
||||
KeyValue,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Read metrics from the guest")]
|
||||
pub struct MetricsCommand {
|
||||
#[arg(short, long, default_value = "tree", help = "Output format")]
|
||||
format: MetricsFormat,
|
||||
#[arg(help = "Guest to read metrics for, either the name or the uuid")]
|
||||
guest: String,
|
||||
}
|
||||
|
||||
impl MetricsCommand {
|
||||
pub async fn run(
|
||||
self,
|
||||
mut client: ControlServiceClient<Channel>,
|
||||
_events: EventStream,
|
||||
) -> Result<()> {
|
||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
||||
let root = client
|
||||
.read_guest_metrics(ReadGuestMetricsRequest { guest_id })
|
||||
.await?
|
||||
.into_inner()
|
||||
.root
|
||||
.unwrap_or_default();
|
||||
match self.format {
|
||||
MetricsFormat::Tree => {
|
||||
self.print_metrics_tree(root)?;
|
||||
}
|
||||
|
||||
MetricsFormat::Json | MetricsFormat::JsonPretty | MetricsFormat::Yaml => {
|
||||
let value = serde_json::to_value(proto2dynamic(root)?)?;
|
||||
let encoded = if self.format == MetricsFormat::JsonPretty {
|
||||
serde_json::to_string_pretty(&value)?
|
||||
} else if self.format == MetricsFormat::Yaml {
|
||||
serde_yaml::to_string(&value)?
|
||||
} else {
|
||||
serde_json::to_string(&value)?
|
||||
};
|
||||
println!("{}", encoded.trim());
|
||||
}
|
||||
|
||||
MetricsFormat::KeyValue => {
|
||||
self.print_key_value(root)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_metrics_tree(&self, root: GuestMetricNode) -> Result<()> {
|
||||
print!("{}", metrics_tree(root));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_key_value(&self, metrics: GuestMetricNode) -> Result<()> {
|
||||
let kvs = metrics_flat(metrics);
|
||||
println!("{}", kv2line(kvs));
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,8 +1,13 @@
|
||||
pub mod attach;
|
||||
pub mod destroy;
|
||||
pub mod idm_snoop;
|
||||
pub mod launch;
|
||||
pub mod list;
|
||||
pub mod logs;
|
||||
pub mod metrics;
|
||||
pub mod pull;
|
||||
pub mod resolve;
|
||||
pub mod top;
|
||||
pub mod watch;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
@ -10,21 +15,28 @@ use clap::{Parser, Subcommand};
|
||||
use krata::{
|
||||
client::ControlClientProvider,
|
||||
events::EventStream,
|
||||
v1::control::{
|
||||
control_service_client::ControlServiceClient, ResolveGuestRequest, WatchEventsRequest,
|
||||
},
|
||||
v1::control::{control_service_client::ControlServiceClient, ResolveGuestRequest},
|
||||
};
|
||||
use tonic::{transport::Channel, Request};
|
||||
|
||||
use self::{
|
||||
attach::AttachCommand, destroy::DestroyCommand, launch::LauchCommand, list::ListCommand,
|
||||
resolve::ResolveCommand, watch::WatchCommand,
|
||||
attach::AttachCommand, destroy::DestroyCommand, idm_snoop::IdmSnoopCommand,
|
||||
launch::LauchCommand, list::ListCommand, logs::LogsCommand, metrics::MetricsCommand,
|
||||
pull::PullCommand, resolve::ResolveCommand, top::TopCommand, watch::WatchCommand,
|
||||
};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(version, about)]
|
||||
#[command(
|
||||
version,
|
||||
about = "Control the krata hypervisor, a secure platform for running containers"
|
||||
)]
|
||||
pub struct ControlCommand {
|
||||
#[arg(short, long, default_value = "unix:///var/lib/krata/daemon.socket")]
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "The connection URL to the krata hypervisor",
|
||||
default_value = "unix:///var/lib/krata/daemon.socket"
|
||||
)]
|
||||
connection: String,
|
||||
|
||||
#[command(subcommand)]
|
||||
@ -37,20 +49,19 @@ pub enum Commands {
|
||||
Destroy(DestroyCommand),
|
||||
List(ListCommand),
|
||||
Attach(AttachCommand),
|
||||
Pull(PullCommand),
|
||||
Logs(LogsCommand),
|
||||
Watch(WatchCommand),
|
||||
Resolve(ResolveCommand),
|
||||
Metrics(MetricsCommand),
|
||||
IdmSnoop(IdmSnoopCommand),
|
||||
Top(TopCommand),
|
||||
}
|
||||
|
||||
impl ControlCommand {
|
||||
pub async fn run(self) -> Result<()> {
|
||||
let mut client = ControlClientProvider::dial(self.connection.parse()?).await?;
|
||||
let events = EventStream::open(
|
||||
client
|
||||
.watch_events(WatchEventsRequest {})
|
||||
.await?
|
||||
.into_inner(),
|
||||
)
|
||||
.await?;
|
||||
let client = ControlClientProvider::dial(self.connection.parse()?).await?;
|
||||
let events = EventStream::open(client.clone()).await?;
|
||||
|
||||
match self.command {
|
||||
Commands::Launch(launch) => {
|
||||
@ -65,6 +76,10 @@ impl ControlCommand {
|
||||
attach.run(client, events).await?;
|
||||
}
|
||||
|
||||
Commands::Logs(logs) => {
|
||||
logs.run(client, events).await?;
|
||||
}
|
||||
|
||||
Commands::List(list) => {
|
||||
list.run(client, events).await?;
|
||||
}
|
||||
@ -76,6 +91,22 @@ impl ControlCommand {
|
||||
Commands::Resolve(resolve) => {
|
||||
resolve.run(client).await?;
|
||||
}
|
||||
|
||||
Commands::Metrics(metrics) => {
|
||||
metrics.run(client, events).await?;
|
||||
}
|
||||
|
||||
Commands::IdmSnoop(snoop) => {
|
||||
snoop.run(client, events).await?;
|
||||
}
|
||||
|
||||
Commands::Top(top) => {
|
||||
top.run(client, events).await?;
|
||||
}
|
||||
|
||||
Commands::Pull(pull) => {
|
||||
pull.run(client).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
42
crates/ctl/src/cli/pull.rs
Normal file
42
crates/ctl/src/cli/pull.rs
Normal file
@ -0,0 +1,42 @@
|
||||
use anyhow::Result;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use krata::v1::{
|
||||
common::GuestOciImageFormat,
|
||||
control::{control_service_client::ControlServiceClient, PullImageRequest},
|
||||
};
|
||||
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::pull::pull_interactive_progress;
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum PullImageFormat {
|
||||
Squashfs,
|
||||
Erofs,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Pull an image into the cache")]
|
||||
pub struct PullCommand {
|
||||
#[arg(help = "Image name")]
|
||||
image: String,
|
||||
#[arg(short = 's', long, default_value = "squashfs", help = "Image format")]
|
||||
image_format: PullImageFormat,
|
||||
}
|
||||
|
||||
impl PullCommand {
|
||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||
let response = client
|
||||
.pull_image(PullImageRequest {
|
||||
image: self.image.clone(),
|
||||
format: match self.image_format {
|
||||
PullImageFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
||||
PullImageFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||
println!("{}", reply.digest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -5,8 +5,9 @@ use krata::v1::control::{control_service_client::ControlServiceClient, ResolveGu
|
||||
use tonic::{transport::Channel, Request};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Resolve a guest name to a uuid")]
|
||||
pub struct ResolveCommand {
|
||||
#[arg()]
|
||||
#[arg(help = "Guest name")]
|
||||
guest: String,
|
||||
}
|
||||
|
||||
|
215
crates/ctl/src/cli/top.rs
Normal file
215
crates/ctl/src/cli/top.rs
Normal file
@ -0,0 +1,215 @@
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use krata::{events::EventStream, v1::control::control_service_client::ControlServiceClient};
|
||||
use std::{
|
||||
io::{self, stdout, Stdout},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::select;
|
||||
use tokio_stream::StreamExt;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crossterm::{
|
||||
event::{Event, KeyCode, KeyEvent, KeyEventKind},
|
||||
execute,
|
||||
terminal::*,
|
||||
};
|
||||
use ratatui::{
|
||||
prelude::*,
|
||||
symbols::border,
|
||||
widgets::{
|
||||
block::{Position, Title},
|
||||
Block, Borders, Row, Table, TableState,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
format::guest_status_text,
|
||||
metrics::{
|
||||
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Dashboard for running guests")]
|
||||
pub struct TopCommand {}
|
||||
|
||||
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
|
||||
|
||||
impl TopCommand {
|
||||
pub async fn run(
|
||||
self,
|
||||
client: ControlServiceClient<Channel>,
|
||||
events: EventStream,
|
||||
) -> Result<()> {
|
||||
let collector = MultiMetricCollector::new(client, events, Duration::from_millis(200))?;
|
||||
let collector = collector.launch().await?;
|
||||
let mut tui = TopCommand::init()?;
|
||||
let mut app = TopApp {
|
||||
metrics: MultiMetricState { guests: vec![] },
|
||||
exit: false,
|
||||
table: TableState::new(),
|
||||
};
|
||||
app.run(collector, &mut tui).await?;
|
||||
TopCommand::restore()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init() -> io::Result<Tui> {
|
||||
execute!(stdout(), EnterAlternateScreen)?;
|
||||
enable_raw_mode()?;
|
||||
Terminal::new(CrosstermBackend::new(stdout()))
|
||||
}
|
||||
|
||||
pub fn restore() -> io::Result<()> {
|
||||
execute!(stdout(), LeaveAlternateScreen)?;
|
||||
disable_raw_mode()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TopApp {
|
||||
table: TableState,
|
||||
metrics: MultiMetricState,
|
||||
exit: bool,
|
||||
}
|
||||
|
||||
impl TopApp {
|
||||
pub async fn run(
|
||||
&mut self,
|
||||
mut collector: MultiMetricCollectorHandle,
|
||||
terminal: &mut Tui,
|
||||
) -> Result<()> {
|
||||
let mut events = crossterm::event::EventStream::new();
|
||||
|
||||
while !self.exit {
|
||||
terminal.draw(|frame| self.render_frame(frame))?;
|
||||
|
||||
select! {
|
||||
x = collector.receiver.recv() => match x {
|
||||
Some(state) => {
|
||||
self.metrics = state;
|
||||
},
|
||||
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
x = events.next() => match x {
|
||||
Some(event) => {
|
||||
let event = event?;
|
||||
self.handle_event(event)?;
|
||||
},
|
||||
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn render_frame(&mut self, frame: &mut Frame) {
|
||||
frame.render_widget(self, frame.size());
|
||||
}
|
||||
|
||||
fn handle_event(&mut self, event: Event) -> io::Result<()> {
|
||||
match event {
|
||||
Event::Key(key_event) if key_event.kind == KeyEventKind::Press => {
|
||||
self.handle_key_event(key_event)
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn exit(&mut self) {
|
||||
self.exit = true;
|
||||
}
|
||||
|
||||
fn handle_key_event(&mut self, key_event: KeyEvent) {
|
||||
if let KeyCode::Char('q') = key_event.code {
|
||||
self.exit()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Widget for &mut TopApp {
|
||||
fn render(self, area: Rect, buf: &mut Buffer) {
|
||||
let title = Title::from(" krata hypervisor ".bold());
|
||||
let instructions = Title::from(vec![" Quit ".into(), "<Q> ".blue().bold()]);
|
||||
let block = Block::default()
|
||||
.title(title.alignment(Alignment::Center))
|
||||
.title(
|
||||
instructions
|
||||
.alignment(Alignment::Center)
|
||||
.position(Position::Bottom),
|
||||
)
|
||||
.borders(Borders::ALL)
|
||||
.border_set(border::THICK);
|
||||
|
||||
let mut rows = vec![];
|
||||
|
||||
for ms in &self.metrics.guests {
|
||||
let Some(ref spec) = ms.guest.spec else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(ref state) = ms.guest.state else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let memory_total = ms
|
||||
.root
|
||||
.as_ref()
|
||||
.and_then(|root| lookup_metric_value(root, "system/memory/total"));
|
||||
let memory_used = ms
|
||||
.root
|
||||
.as_ref()
|
||||
.and_then(|root| lookup_metric_value(root, "system/memory/used"));
|
||||
let memory_free = ms
|
||||
.root
|
||||
.as_ref()
|
||||
.and_then(|root| lookup_metric_value(root, "system/memory/free"));
|
||||
|
||||
let row = Row::new(vec![
|
||||
spec.name.clone(),
|
||||
ms.guest.id.clone(),
|
||||
guest_status_text(state.status()),
|
||||
memory_total.unwrap_or_default(),
|
||||
memory_used.unwrap_or_default(),
|
||||
memory_free.unwrap_or_default(),
|
||||
]);
|
||||
rows.push(row);
|
||||
}
|
||||
|
||||
let widths = [
|
||||
Constraint::Min(8),
|
||||
Constraint::Min(8),
|
||||
Constraint::Min(8),
|
||||
Constraint::Min(8),
|
||||
Constraint::Min(8),
|
||||
Constraint::Min(8),
|
||||
];
|
||||
|
||||
let table = Table::new(rows, widths)
|
||||
.header(
|
||||
Row::new(vec![
|
||||
"name",
|
||||
"id",
|
||||
"status",
|
||||
"total memory",
|
||||
"used memory",
|
||||
"free memory",
|
||||
])
|
||||
.style(Style::new().bold())
|
||||
.bottom_margin(1),
|
||||
)
|
||||
.column_spacing(1)
|
||||
.block(block);
|
||||
|
||||
StatefulWidget::render(table, area, buf, &mut self.table);
|
||||
}
|
||||
}
|
@ -7,7 +7,7 @@ use krata::{
|
||||
use prost_reflect::ReflectMessage;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::format::{guest_state_text, kv2line, proto2dynamic, proto2kv};
|
||||
use crate::format::{guest_simple_line, kv2line, proto2dynamic, proto2kv};
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||
enum WatchFormat {
|
||||
@ -17,8 +17,9 @@ enum WatchFormat {
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(about = "Watch for guest changes")]
|
||||
pub struct WatchCommand {
|
||||
#[arg(short, long, default_value = "simple")]
|
||||
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||
format: WatchFormat,
|
||||
}
|
||||
|
||||
@ -27,12 +28,10 @@ impl WatchCommand {
|
||||
let mut stream = events.subscribe();
|
||||
loop {
|
||||
let event = stream.recv().await?;
|
||||
match event {
|
||||
Event::GuestChanged(changed) => {
|
||||
let guest = changed.guest.clone();
|
||||
self.print_event("guest.changed", changed, guest)?;
|
||||
}
|
||||
}
|
||||
|
||||
let Event::GuestChanged(changed) = event;
|
||||
let guest = changed.guest.clone();
|
||||
self.print_event("guest.changed", changed, guest)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,12 +44,7 @@ impl WatchCommand {
|
||||
match self.format {
|
||||
WatchFormat::Simple => {
|
||||
if let Some(guest) = guest {
|
||||
println!(
|
||||
"{} guest={} status=\"{}\"",
|
||||
typ,
|
||||
guest.id,
|
||||
guest_state_text(guest.state.as_ref()).replace('"', "\\\"")
|
||||
);
|
||||
println!("{}", guest_simple_line(&guest));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,29 +69,26 @@ impl StdioConsoleStream {
|
||||
Ok(tokio::task::spawn(async move {
|
||||
let mut stream = events.subscribe();
|
||||
while let Ok(event) = stream.recv().await {
|
||||
match event {
|
||||
Event::GuestChanged(changed) => {
|
||||
let Some(guest) = changed.guest else {
|
||||
continue;
|
||||
};
|
||||
let Event::GuestChanged(changed) = event;
|
||||
let Some(guest) = changed.guest else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let Some(state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
let Some(state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if guest.id != id {
|
||||
continue;
|
||||
}
|
||||
if guest.id != id {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(exit_info) = state.exit_info {
|
||||
return Some(exit_info.code);
|
||||
}
|
||||
if let Some(exit_info) = state.exit_info {
|
||||
return Some(exit_info.code);
|
||||
}
|
||||
|
||||
let status = state.status();
|
||||
if status == GuestStatus::Destroying || status == GuestStatus::Destroyed {
|
||||
return Some(10);
|
||||
}
|
||||
}
|
||||
let status = state.status();
|
||||
if status == GuestStatus::Destroying || status == GuestStatus::Destroyed {
|
||||
return Some(10);
|
||||
}
|
||||
}
|
||||
None
|
||||
|
@ -1,8 +1,12 @@
|
||||
use std::collections::HashMap;
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use krata::v1::common::{GuestState, GuestStatus};
|
||||
use prost_reflect::{DynamicMessage, ReflectMessage, Value};
|
||||
use fancy_duration::FancyDuration;
|
||||
use human_bytes::human_bytes;
|
||||
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
|
||||
use prost_reflect::{DynamicMessage, FieldDescriptor, ReflectMessage, Value as ReflectValue};
|
||||
use prost_types::Value;
|
||||
use termtree::Tree;
|
||||
|
||||
pub fn proto2dynamic(proto: impl ReflectMessage) -> Result<DynamicMessage> {
|
||||
Ok(DynamicMessage::decode(
|
||||
@ -15,38 +19,56 @@ pub fn proto2kv(proto: impl ReflectMessage) -> Result<HashMap<String, String>> {
|
||||
let message = proto2dynamic(proto)?;
|
||||
let mut map = HashMap::new();
|
||||
|
||||
fn crawl(prefix: &str, map: &mut HashMap<String, String>, message: &DynamicMessage) {
|
||||
for (field, value) in message.fields() {
|
||||
let path = if prefix.is_empty() {
|
||||
field.name().to_string()
|
||||
} else {
|
||||
format!("{}.{}", prefix, field.name())
|
||||
};
|
||||
match value {
|
||||
Value::Message(child) => {
|
||||
crawl(&path, map, child);
|
||||
fn crawl(
|
||||
prefix: String,
|
||||
field: Option<&FieldDescriptor>,
|
||||
map: &mut HashMap<String, String>,
|
||||
value: &ReflectValue,
|
||||
) {
|
||||
match value {
|
||||
ReflectValue::Message(child) => {
|
||||
for (field, field_value) in child.fields() {
|
||||
let path = if prefix.is_empty() {
|
||||
field.json_name().to_string()
|
||||
} else {
|
||||
format!("{}.{}", prefix, field.json_name())
|
||||
};
|
||||
crawl(path, Some(&field), map, field_value);
|
||||
}
|
||||
}
|
||||
|
||||
Value::EnumNumber(number) => {
|
||||
if let Some(e) = field.kind().as_enum() {
|
||||
ReflectValue::EnumNumber(number) => {
|
||||
if let Some(kind) = field.map(|x| x.kind()) {
|
||||
if let Some(e) = kind.as_enum() {
|
||||
if let Some(value) = e.get_value(*number) {
|
||||
map.insert(path, value.name().to_string());
|
||||
map.insert(prefix, value.name().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Value::String(value) => {
|
||||
map.insert(path, value.clone());
|
||||
}
|
||||
ReflectValue::String(value) => {
|
||||
map.insert(prefix.to_string(), value.clone());
|
||||
}
|
||||
|
||||
_ => {
|
||||
map.insert(path, value.to_string());
|
||||
ReflectValue::List(value) => {
|
||||
for (x, value) in value.iter().enumerate() {
|
||||
crawl(format!("{}.{}", prefix, x), field, map, value);
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
map.insert(prefix.to_string(), value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
crawl("", &mut map, &message);
|
||||
crawl(
|
||||
"".to_string(),
|
||||
None,
|
||||
&mut map,
|
||||
&ReflectValue::Message(message),
|
||||
);
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
@ -71,16 +93,77 @@ pub fn guest_status_text(status: GuestStatus) -> String {
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn guest_state_text(state: Option<&GuestState>) -> String {
|
||||
let state = state.cloned().unwrap_or_default();
|
||||
let mut text = guest_status_text(state.status());
|
||||
|
||||
if let Some(exit) = state.exit_info {
|
||||
text.push_str(&format!(" (exit code: {})", exit.code));
|
||||
}
|
||||
|
||||
if let Some(error) = state.error_info {
|
||||
text.push_str(&format!(" (error: {})", error.message));
|
||||
}
|
||||
text
|
||||
pub fn guest_simple_line(guest: &Guest) -> String {
|
||||
let state = guest_status_text(
|
||||
guest
|
||||
.state
|
||||
.as_ref()
|
||||
.map(|x| x.status())
|
||||
.unwrap_or(GuestStatus::Unknown),
|
||||
);
|
||||
let name = guest.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
||||
let network = guest.state.as_ref().and_then(|x| x.network.as_ref());
|
||||
let ipv4 = network.map(|x| x.guest_ipv4.as_str()).unwrap_or("");
|
||||
let ipv6 = network.map(|x| x.guest_ipv6.as_str()).unwrap_or("");
|
||||
format!("{}\t{}\t{}\t{}\t{}", guest.id, state, name, ipv4, ipv6)
|
||||
}
|
||||
|
||||
fn metrics_value_string(value: Value) -> String {
|
||||
proto2dynamic(value)
|
||||
.map(|x| serde_json::to_string(&x).ok())
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn metrics_value_numeric(value: Value) -> f64 {
|
||||
let string = metrics_value_string(value);
|
||||
string.parse::<f64>().ok().unwrap_or(f64::NAN)
|
||||
}
|
||||
|
||||
pub fn metrics_value_pretty(value: Value, format: GuestMetricFormat) -> String {
|
||||
match format {
|
||||
GuestMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
|
||||
GuestMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
|
||||
GuestMetricFormat::DurationSeconds => {
|
||||
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
|
||||
}
|
||||
_ => metrics_value_string(value),
|
||||
}
|
||||
}
|
||||
|
||||
fn metrics_flat_internal(prefix: &str, node: GuestMetricNode, map: &mut HashMap<String, String>) {
|
||||
if let Some(value) = node.value {
|
||||
map.insert(prefix.to_string(), metrics_value_string(value));
|
||||
}
|
||||
|
||||
for child in node.children {
|
||||
let path = if prefix.is_empty() {
|
||||
child.name.to_string()
|
||||
} else {
|
||||
format!("{}.{}", prefix, child.name)
|
||||
};
|
||||
metrics_flat_internal(&path, child, map);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn metrics_flat(root: GuestMetricNode) -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
metrics_flat_internal("", root, &mut map);
|
||||
map
|
||||
}
|
||||
|
||||
pub fn metrics_tree(node: GuestMetricNode) -> Tree<String> {
|
||||
let mut name = node.name.to_string();
|
||||
let format = node.format();
|
||||
if let Some(value) = node.value {
|
||||
let value_string = metrics_value_pretty(value, format);
|
||||
name.push_str(&format!(": {}", value_string));
|
||||
}
|
||||
|
||||
let mut tree = Tree::new(name);
|
||||
for child in node.children {
|
||||
tree.push(metrics_tree(child));
|
||||
}
|
||||
tree
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
pub mod cli;
|
||||
pub mod console;
|
||||
pub mod format;
|
||||
pub mod metrics;
|
||||
pub mod pull;
|
||||
|
158
crates/ctl/src/metrics.rs
Normal file
158
crates/ctl/src/metrics.rs
Normal file
@ -0,0 +1,158 @@
|
||||
use anyhow::Result;
|
||||
use krata::{
|
||||
events::EventStream,
|
||||
v1::{
|
||||
common::{Guest, GuestMetricNode, GuestStatus},
|
||||
control::{
|
||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||
ListGuestsRequest, ReadGuestMetricsRequest,
|
||||
},
|
||||
},
|
||||
};
|
||||
use log::error;
|
||||
use std::time::Duration;
|
||||
use tokio::{
|
||||
select,
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
task::JoinHandle,
|
||||
time::{sleep, timeout},
|
||||
};
|
||||
use tonic::transport::Channel;
|
||||
|
||||
use crate::format::metrics_value_pretty;
|
||||
|
||||
pub struct MetricState {
|
||||
pub guest: Guest,
|
||||
pub root: Option<GuestMetricNode>,
|
||||
}
|
||||
|
||||
pub struct MultiMetricState {
|
||||
pub guests: Vec<MetricState>,
|
||||
}
|
||||
|
||||
pub struct MultiMetricCollector {
|
||||
client: ControlServiceClient<Channel>,
|
||||
events: EventStream,
|
||||
period: Duration,
|
||||
}
|
||||
|
||||
pub struct MultiMetricCollectorHandle {
|
||||
pub receiver: Receiver<MultiMetricState>,
|
||||
task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Drop for MultiMetricCollectorHandle {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiMetricCollector {
|
||||
pub fn new(
|
||||
client: ControlServiceClient<Channel>,
|
||||
events: EventStream,
|
||||
period: Duration,
|
||||
) -> Result<MultiMetricCollector> {
|
||||
Ok(MultiMetricCollector {
|
||||
client,
|
||||
events,
|
||||
period,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn launch(mut self) -> Result<MultiMetricCollectorHandle> {
|
||||
let (sender, receiver) = channel::<MultiMetricState>(100);
|
||||
let task = tokio::task::spawn(async move {
|
||||
if let Err(error) = self.process(sender).await {
|
||||
error!("failed to process multi metric collector: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(MultiMetricCollectorHandle { receiver, task })
|
||||
}
|
||||
|
||||
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
|
||||
let mut events = self.events.subscribe();
|
||||
let mut guests: Vec<Guest> = self
|
||||
.client
|
||||
.list_guests(ListGuestsRequest {})
|
||||
.await?
|
||||
.into_inner()
|
||||
.guests;
|
||||
loop {
|
||||
let collect = select! {
|
||||
x = events.recv() => match x {
|
||||
Ok(event) => {
|
||||
let Event::GuestChanged(changed) = event;
|
||||
let Some(guest) = changed.guest else {
|
||||
continue;
|
||||
};
|
||||
let Some(ref state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
guests.retain(|x| x.id != guest.id);
|
||||
if state.status() != GuestStatus::Destroying {
|
||||
guests.push(guest);
|
||||
}
|
||||
false
|
||||
},
|
||||
|
||||
Err(error) => {
|
||||
return Err(error.into());
|
||||
}
|
||||
},
|
||||
|
||||
_ = sleep(self.period) => {
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
if !collect {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut metrics = Vec::new();
|
||||
for guest in &guests {
|
||||
let Some(ref state) = guest.state else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if state.status() != GuestStatus::Started {
|
||||
continue;
|
||||
}
|
||||
|
||||
let root = timeout(
|
||||
Duration::from_secs(5),
|
||||
self.client.read_guest_metrics(ReadGuestMetricsRequest {
|
||||
guest_id: guest.id.clone(),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|x| x.ok())
|
||||
.map(|x| x.into_inner())
|
||||
.and_then(|x| x.root);
|
||||
metrics.push(MetricState {
|
||||
guest: guest.clone(),
|
||||
root,
|
||||
});
|
||||
}
|
||||
sender.send(MultiMetricState { guests: metrics }).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lookup<'a>(node: &'a GuestMetricNode, path: &str) -> Option<&'a GuestMetricNode> {
|
||||
let Some((what, b)) = path.split_once('/') else {
|
||||
return node.children.iter().find(|x| x.name == path);
|
||||
};
|
||||
let next = node.children.iter().find(|x| x.name == what)?;
|
||||
return lookup(next, b);
|
||||
}
|
||||
|
||||
pub fn lookup_metric_value(node: &GuestMetricNode, path: &str) -> Option<String> {
|
||||
lookup(node, path).and_then(|x| {
|
||||
x.value
|
||||
.as_ref()
|
||||
.map(|v| metrics_value_pretty(v.clone(), x.format()))
|
||||
})
|
||||
}
|
118
crates/ctl/src/pull.rs
Normal file
118
crates/ctl/src/pull.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use krata::v1::control::{PullImageProgressLayerPhase, PullImageProgressPhase, PullImageReply};
|
||||
use tokio_stream::StreamExt;
|
||||
use tonic::Streaming;
|
||||
|
||||
pub async fn pull_interactive_progress(
|
||||
mut stream: Streaming<PullImageReply>,
|
||||
) -> Result<PullImageReply> {
|
||||
let mut multi_progress: Option<(MultiProgress, HashMap<String, ProgressBar>)> = None;
|
||||
|
||||
while let Some(reply) = stream.next().await {
|
||||
let reply = reply?;
|
||||
|
||||
if reply.progress.is_none() && !reply.digest.is_empty() {
|
||||
return Ok(reply);
|
||||
}
|
||||
|
||||
let Some(oci) = reply.progress else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if multi_progress.is_none() {
|
||||
multi_progress = Some((MultiProgress::new(), HashMap::new()));
|
||||
}
|
||||
|
||||
let Some((multi_progress, progresses)) = multi_progress.as_mut() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match oci.phase() {
|
||||
PullImageProgressPhase::Resolved
|
||||
| PullImageProgressPhase::ConfigAcquire
|
||||
| PullImageProgressPhase::LayerAcquire => {
|
||||
if progresses.is_empty() && !oci.layers.is_empty() {
|
||||
for layer in &oci.layers {
|
||||
let bar = ProgressBar::new(layer.total);
|
||||
bar.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
|
||||
progresses.insert(layer.id.clone(), bar.clone());
|
||||
multi_progress.add(bar);
|
||||
}
|
||||
}
|
||||
|
||||
for layer in oci.layers {
|
||||
let Some(progress) = progresses.get_mut(&layer.id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let phase = match layer.phase() {
|
||||
PullImageProgressLayerPhase::Waiting => "waiting",
|
||||
PullImageProgressLayerPhase::Downloading => "downloading",
|
||||
PullImageProgressLayerPhase::Downloaded => "downloaded",
|
||||
PullImageProgressLayerPhase::Extracting => "extracting",
|
||||
PullImageProgressLayerPhase::Extracted => "extracted",
|
||||
_ => "unknown",
|
||||
};
|
||||
|
||||
let simple = if let Some((_, hash)) = layer.id.split_once(':') {
|
||||
hash
|
||||
} else {
|
||||
"unknown"
|
||||
};
|
||||
let simple = if simple.len() > 10 {
|
||||
&simple[0..10]
|
||||
} else {
|
||||
simple
|
||||
};
|
||||
let message = format!(
|
||||
"{:width$} {:phwidth$}",
|
||||
simple,
|
||||
phase,
|
||||
width = 10,
|
||||
phwidth = 11
|
||||
);
|
||||
|
||||
if message != progress.message() {
|
||||
progress.set_message(message);
|
||||
}
|
||||
|
||||
progress.update(|state| {
|
||||
state.set_len(layer.total);
|
||||
state.set_pos(layer.value);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
PullImageProgressPhase::Packing => {
|
||||
for (key, bar) in &mut *progresses {
|
||||
if key == "packing" {
|
||||
continue;
|
||||
}
|
||||
bar.finish_and_clear();
|
||||
multi_progress.remove(bar);
|
||||
}
|
||||
progresses.retain(|k, _| k == "packing");
|
||||
if progresses.is_empty() {
|
||||
let progress = ProgressBar::new(100);
|
||||
progress.set_message("packing ");
|
||||
progress.set_style(ProgressStyle::with_template("{msg} {bar}").unwrap());
|
||||
progresses.insert("packing".to_string(), progress);
|
||||
}
|
||||
let Some(progress) = progresses.get("packing") else {
|
||||
continue;
|
||||
};
|
||||
|
||||
progress.update(|state| {
|
||||
state.set_len(oci.total);
|
||||
state.set_pos(oci.value);
|
||||
});
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Err(anyhow!("never received final reply for image pull"))
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
name = "krata-daemon"
|
||||
description = "Daemon for the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -13,11 +13,13 @@ anyhow = { workspace = true }
|
||||
async-stream = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
circular-buffer = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.3" }
|
||||
krata-runtime = { path = "../runtime", version = "^0.0.3" }
|
||||
krata = { path = "../krata", version = "^0.0.9" }
|
||||
krata-oci = { path = "../oci", version = "^0.0.9" }
|
||||
krata-runtime = { path = "../runtime", version = "^0.0.9" }
|
||||
log = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
redb = { workspace = true }
|
||||
|
@ -3,7 +3,6 @@ use clap::Parser;
|
||||
use env_logger::Env;
|
||||
use krata::dial::ControlDialAddress;
|
||||
use kratad::Daemon;
|
||||
use kratart::Runtime;
|
||||
use log::LevelFilter;
|
||||
use std::{
|
||||
str::FromStr,
|
||||
@ -27,8 +26,8 @@ async fn main() -> Result<()> {
|
||||
|
||||
let args = DaemonCommand::parse();
|
||||
let addr = ControlDialAddress::from_str(&args.listen)?;
|
||||
let runtime = Runtime::new(args.store.clone()).await?;
|
||||
let mut daemon = Daemon::new(args.store.clone(), runtime).await?;
|
||||
|
||||
let mut daemon = Daemon::new(args.store.clone()).await?;
|
||||
daemon.listen(addr).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
153
crates/daemon/src/console.rs
Normal file
153
crates/daemon/src/console.rs
Normal file
@ -0,0 +1,153 @@
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use circular_buffer::CircularBuffer;
|
||||
use kratart::channel::ChannelService;
|
||||
use log::error;
|
||||
use tokio::{
|
||||
sync::{
|
||||
mpsc::{error::TrySendError, Receiver, Sender},
|
||||
Mutex,
|
||||
},
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
||||
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
||||
type ConsoleBuffer = Box<RawConsoleBuffer>;
|
||||
|
||||
type ListenerMap = Arc<Mutex<HashMap<u32, Vec<Sender<Vec<u8>>>>>>;
|
||||
type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonConsoleHandle {
|
||||
listeners: ListenerMap,
|
||||
buffers: BufferMap,
|
||||
sender: Sender<(u32, Vec<u8>)>,
|
||||
task: Arc<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonConsoleAttachHandle {
|
||||
pub initial: Vec<u8>,
|
||||
listeners: ListenerMap,
|
||||
sender: Sender<(u32, Vec<u8>)>,
|
||||
domid: u32,
|
||||
}
|
||||
|
||||
impl DaemonConsoleAttachHandle {
|
||||
pub async fn unsubscribe(&self) -> Result<()> {
|
||||
let mut guard = self.listeners.lock().await;
|
||||
let _ = guard.remove(&self.domid);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn send(&self, data: Vec<u8>) -> Result<()> {
|
||||
Ok(self.sender.send((self.domid, data)).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl DaemonConsoleHandle {
|
||||
pub async fn attach(
|
||||
&self,
|
||||
domid: u32,
|
||||
sender: Sender<Vec<u8>>,
|
||||
) -> Result<DaemonConsoleAttachHandle> {
|
||||
let buffers = self.buffers.lock().await;
|
||||
let buffer = buffers.get(&domid).map(|x| x.to_vec()).unwrap_or_default();
|
||||
drop(buffers);
|
||||
let mut listeners = self.listeners.lock().await;
|
||||
let senders = listeners.entry(domid).or_default();
|
||||
senders.push(sender);
|
||||
Ok(DaemonConsoleAttachHandle {
|
||||
initial: buffer,
|
||||
sender: self.sender.clone(),
|
||||
listeners: self.listeners.clone(),
|
||||
domid,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DaemonConsoleHandle {
|
||||
fn drop(&mut self) {
|
||||
if Arc::strong_count(&self.task) <= 1 {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DaemonConsole {
|
||||
listeners: ListenerMap,
|
||||
buffers: BufferMap,
|
||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||
sender: Sender<(u32, Vec<u8>)>,
|
||||
task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl DaemonConsole {
|
||||
pub async fn new() -> Result<DaemonConsole> {
|
||||
let (service, sender, receiver) =
|
||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||
let task = service.launch().await?;
|
||||
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
||||
let buffers = Arc::new(Mutex::new(HashMap::new()));
|
||||
Ok(DaemonConsole {
|
||||
listeners,
|
||||
buffers,
|
||||
receiver,
|
||||
sender,
|
||||
task,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn launch(mut self) -> Result<DaemonConsoleHandle> {
|
||||
let listeners = self.listeners.clone();
|
||||
let buffers = self.buffers.clone();
|
||||
let sender = self.sender.clone();
|
||||
let task = tokio::task::spawn(async move {
|
||||
if let Err(error) = self.process().await {
|
||||
error!("failed to process console: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(DaemonConsoleHandle {
|
||||
listeners,
|
||||
buffers,
|
||||
sender,
|
||||
task: Arc::new(task),
|
||||
})
|
||||
}
|
||||
|
||||
async fn process(&mut self) -> Result<()> {
|
||||
loop {
|
||||
let Some((domid, data)) = self.receiver.recv().await else {
|
||||
break;
|
||||
};
|
||||
|
||||
let mut buffers = self.buffers.lock().await;
|
||||
if let Some(data) = data {
|
||||
let buffer = buffers
|
||||
.entry(domid)
|
||||
.or_insert_with_key(|_| RawConsoleBuffer::boxed());
|
||||
buffer.extend_from_slice(&data);
|
||||
drop(buffers);
|
||||
let mut listeners = self.listeners.lock().await;
|
||||
if let Some(senders) = listeners.get_mut(&domid) {
|
||||
senders.retain(|sender| {
|
||||
!matches!(sender.try_send(data.to_vec()), Err(TrySendError::Closed(_)))
|
||||
});
|
||||
}
|
||||
} else {
|
||||
buffers.remove(&domid);
|
||||
let mut listeners = self.listeners.lock().await;
|
||||
listeners.remove(&domid);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DaemonConsole {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
@ -1,27 +1,40 @@
|
||||
use std::{io, pin::Pin, str::FromStr};
|
||||
|
||||
use async_stream::try_stream;
|
||||
use futures::Stream;
|
||||
use krata::v1::{
|
||||
common::{Guest, GuestState, GuestStatus},
|
||||
control::{
|
||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
||||
ListGuestsReply, ListGuestsRequest, ResolveGuestReply, ResolveGuestRequest,
|
||||
WatchEventsReply, WatchEventsRequest,
|
||||
use krata::{
|
||||
idm::protocol::{
|
||||
idm_request::Request as IdmRequestType, idm_response::Response as IdmResponseType,
|
||||
IdmMetricsRequest,
|
||||
},
|
||||
v1::{
|
||||
common::{Guest, GuestOciImageFormat, GuestState, GuestStatus},
|
||||
control::{
|
||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
||||
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
||||
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
||||
},
|
||||
},
|
||||
};
|
||||
use kratart::Runtime;
|
||||
use krataoci::{
|
||||
name::ImageName,
|
||||
packer::{service::OciPackerService, OciImagePacked, OciPackedFormat},
|
||||
progress::{OciProgress, OciProgressContext},
|
||||
};
|
||||
use std::{pin::Pin, str::FromStr};
|
||||
use tokio::{
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
select,
|
||||
sync::mpsc::Sender,
|
||||
sync::mpsc::{channel, Sender},
|
||||
task::JoinError,
|
||||
};
|
||||
use tokio_stream::StreamExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{db::GuestStore, event::DaemonEventContext};
|
||||
use crate::{
|
||||
console::DaemonConsoleHandle, db::GuestStore, event::DaemonEventContext, idm::DaemonIdmHandle,
|
||||
metrics::idm_metric_to_api, oci::convert_oci_progress,
|
||||
};
|
||||
|
||||
pub struct ApiError {
|
||||
message: String,
|
||||
@ -42,42 +55,59 @@ impl From<ApiError> for Status {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RuntimeControlService {
|
||||
pub struct DaemonControlService {
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
console: DaemonConsoleHandle,
|
||||
idm: DaemonIdmHandle,
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
packer: OciPackerService,
|
||||
}
|
||||
|
||||
impl RuntimeControlService {
|
||||
impl DaemonControlService {
|
||||
pub fn new(
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
console: DaemonConsoleHandle,
|
||||
idm: DaemonIdmHandle,
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
packer: OciPackerService,
|
||||
) -> Self {
|
||||
Self {
|
||||
events,
|
||||
runtime,
|
||||
console,
|
||||
idm,
|
||||
guests,
|
||||
guest_reconciler_notify,
|
||||
packer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum ConsoleDataSelect {
|
||||
Read(io::Result<usize>),
|
||||
Read(Option<Vec<u8>>),
|
||||
Write(Option<Result<ConsoleDataRequest, tonic::Status>>),
|
||||
}
|
||||
|
||||
enum PullImageSelect {
|
||||
Progress(usize),
|
||||
Completed(Result<Result<OciImagePacked, anyhow::Error>, JoinError>),
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl ControlService for RuntimeControlService {
|
||||
impl ControlService for DaemonControlService {
|
||||
type ConsoleDataStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
||||
|
||||
type PullImageStream =
|
||||
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
||||
|
||||
type WatchEventsStream =
|
||||
Pin<Box<dyn Stream<Item = Result<WatchEventsReply, Status>> + Send + 'static>>;
|
||||
|
||||
type SnoopIdmStream =
|
||||
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
||||
|
||||
async fn create_guest(
|
||||
&self,
|
||||
request: Request<CreateGuestRequest>,
|
||||
@ -200,27 +230,64 @@ impl ControlService for RuntimeControlService {
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let mut console = self.runtime.console(uuid).await.map_err(ApiError::from)?;
|
||||
let guest = self
|
||||
.guests
|
||||
.read(uuid)
|
||||
.await
|
||||
.map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?
|
||||
.ok_or_else(|| ApiError {
|
||||
message: "guest did not exist in the database".to_string(),
|
||||
})?;
|
||||
|
||||
let Some(ref state) = guest.state else {
|
||||
return Err(ApiError {
|
||||
message: "guest did not have state".to_string(),
|
||||
}
|
||||
.into());
|
||||
};
|
||||
|
||||
let domid = state.domid;
|
||||
if domid == 0 {
|
||||
return Err(ApiError {
|
||||
message: "invalid domid on the guest".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let (sender, mut receiver) = channel(100);
|
||||
let console = self
|
||||
.console
|
||||
.attach(domid, sender)
|
||||
.await
|
||||
.map_err(|error| ApiError {
|
||||
message: format!("failed to attach to console: {}", error),
|
||||
})?;
|
||||
|
||||
let output = try_stream! {
|
||||
let mut buffer: Vec<u8> = vec![0u8; 256];
|
||||
yield ConsoleDataReply { data: console.initial.clone(), };
|
||||
loop {
|
||||
let what = select! {
|
||||
x = console.read_handle.read(&mut buffer) => ConsoleDataSelect::Read(x),
|
||||
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
||||
x = input.next() => ConsoleDataSelect::Write(x),
|
||||
};
|
||||
|
||||
match what {
|
||||
ConsoleDataSelect::Read(result) => {
|
||||
let size = result?;
|
||||
let data = buffer[0..size].to_vec();
|
||||
ConsoleDataSelect::Read(Some(data)) => {
|
||||
yield ConsoleDataReply { data, };
|
||||
},
|
||||
|
||||
ConsoleDataSelect::Read(None) => {
|
||||
break;
|
||||
}
|
||||
|
||||
ConsoleDataSelect::Write(Some(request)) => {
|
||||
let request = request?;
|
||||
if !request.data.is_empty() {
|
||||
console.write_handle.write_all(&request.data).await?;
|
||||
console.send(request.data).await.map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
}
|
||||
},
|
||||
|
||||
@ -234,6 +301,123 @@ impl ControlService for RuntimeControlService {
|
||||
Ok(Response::new(Box::pin(output) as Self::ConsoleDataStream))
|
||||
}
|
||||
|
||||
async fn read_guest_metrics(
|
||||
&self,
|
||||
request: Request<ReadGuestMetricsRequest>,
|
||||
) -> Result<Response<ReadGuestMetricsReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let guest = self
|
||||
.guests
|
||||
.read(uuid)
|
||||
.await
|
||||
.map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?
|
||||
.ok_or_else(|| ApiError {
|
||||
message: "guest did not exist in the database".to_string(),
|
||||
})?;
|
||||
|
||||
let Some(ref state) = guest.state else {
|
||||
return Err(ApiError {
|
||||
message: "guest did not have state".to_string(),
|
||||
}
|
||||
.into());
|
||||
};
|
||||
|
||||
let domid = state.domid;
|
||||
if domid == 0 {
|
||||
return Err(ApiError {
|
||||
message: "invalid domid on the guest".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let client = self.idm.client(domid).await.map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
|
||||
let response = client
|
||||
.send(IdmRequestType::Metrics(IdmMetricsRequest {}))
|
||||
.await
|
||||
.map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
|
||||
let mut reply = ReadGuestMetricsReply::default();
|
||||
if let IdmResponseType::Metrics(metrics) = response {
|
||||
reply.root = metrics.root.map(idm_metric_to_api);
|
||||
}
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
|
||||
async fn pull_image(
|
||||
&self,
|
||||
request: Request<PullImageRequest>,
|
||||
) -> Result<Response<Self::PullImageStream>, Status> {
|
||||
let request = request.into_inner();
|
||||
let name = ImageName::parse(&request.image).map_err(|err| ApiError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
let format = match request.format() {
|
||||
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
||||
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||
};
|
||||
let (sender, mut receiver) = channel::<OciProgress>(100);
|
||||
let context = OciProgressContext::new(sender);
|
||||
|
||||
let our_packer = self.packer.clone();
|
||||
|
||||
let output = try_stream! {
|
||||
let mut task = tokio::task::spawn(async move {
|
||||
our_packer.request(name, format, context).await
|
||||
});
|
||||
loop {
|
||||
let mut progresses = Vec::new();
|
||||
let what = select! {
|
||||
x = receiver.recv_many(&mut progresses, 10) => PullImageSelect::Progress(x),
|
||||
x = &mut task => PullImageSelect::Completed(x),
|
||||
};
|
||||
match what {
|
||||
PullImageSelect::Progress(count) => {
|
||||
if count > 0 {
|
||||
let progress = progresses.remove(progresses.len() - 1);
|
||||
let reply = PullImageReply {
|
||||
progress: Some(convert_oci_progress(progress)),
|
||||
digest: String::new(),
|
||||
format: GuestOciImageFormat::Unknown.into(),
|
||||
};
|
||||
yield reply;
|
||||
}
|
||||
},
|
||||
|
||||
PullImageSelect::Completed(result) => {
|
||||
let result = result.map_err(|err| ApiError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
let packed = result.map_err(|err| ApiError {
|
||||
message: err.to_string(),
|
||||
})?;
|
||||
let reply = PullImageReply {
|
||||
progress: None,
|
||||
digest: packed.digest,
|
||||
format: match packed.format {
|
||||
OciPackedFormat::Squashfs => GuestOciImageFormat::Squashfs.into(),
|
||||
OciPackedFormat::Erofs => GuestOciImageFormat::Erofs.into(),
|
||||
},
|
||||
};
|
||||
yield reply;
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Response::new(Box::pin(output) as Self::PullImageStream))
|
||||
}
|
||||
|
||||
async fn watch_events(
|
||||
&self,
|
||||
request: Request<WatchEventsRequest>,
|
||||
@ -247,4 +431,18 @@ impl ControlService for RuntimeControlService {
|
||||
};
|
||||
Ok(Response::new(Box::pin(output) as Self::WatchEventsStream))
|
||||
}
|
||||
|
||||
async fn snoop_idm(
|
||||
&self,
|
||||
request: Request<SnoopIdmRequest>,
|
||||
) -> Result<Response<Self::SnoopIdmStream>, Status> {
|
||||
let _ = request.into_inner();
|
||||
let mut messages = self.idm.snoop();
|
||||
let output = try_stream! {
|
||||
while let Ok(event) = messages.recv().await {
|
||||
yield SnoopIdmReply { from: event.from, to: event.to, packet: Some(event.packet) };
|
||||
}
|
||||
};
|
||||
Ok(Response::new(Box::pin(output) as Self::SnoopIdmStream))
|
||||
}
|
||||
}
|
||||
|
@ -6,10 +6,10 @@ use std::{
|
||||
|
||||
use anyhow::Result;
|
||||
use krata::{
|
||||
idm::protocol::{idm_event::Event, IdmPacket},
|
||||
idm::protocol::{idm_event::Event, IdmEvent},
|
||||
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
||||
};
|
||||
use log::error;
|
||||
use log::{error, warn};
|
||||
use tokio::{
|
||||
select,
|
||||
sync::{
|
||||
@ -21,15 +21,12 @@ use tokio::{
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
db::GuestStore,
|
||||
idm::{DaemonIdmHandle, DaemonIdmSubscribeHandle},
|
||||
};
|
||||
use crate::{db::GuestStore, idm::DaemonIdmHandle};
|
||||
|
||||
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
||||
|
||||
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||
const IDM_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||
const IDM_EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonEventContext {
|
||||
@ -52,9 +49,9 @@ pub struct DaemonEventGenerator {
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
feed: broadcast::Receiver<DaemonEvent>,
|
||||
idm: DaemonIdmHandle,
|
||||
idms: HashMap<u32, (Uuid, DaemonIdmSubscribeHandle)>,
|
||||
idm_sender: Sender<(u32, IdmPacket)>,
|
||||
idm_receiver: Receiver<(u32, IdmPacket)>,
|
||||
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
||||
idm_sender: Sender<(u32, IdmEvent)>,
|
||||
idm_receiver: Receiver<(u32, IdmEvent)>,
|
||||
_event_sender: broadcast::Sender<DaemonEvent>,
|
||||
}
|
||||
|
||||
@ -65,7 +62,7 @@ impl DaemonEventGenerator {
|
||||
idm: DaemonIdmHandle,
|
||||
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
||||
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
||||
let (idm_sender, idm_receiver) = channel(IDM_CHANNEL_QUEUE_LEN);
|
||||
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
||||
let generator = DaemonEventGenerator {
|
||||
guests,
|
||||
guest_reconciler_notify,
|
||||
@ -81,46 +78,55 @@ impl DaemonEventGenerator {
|
||||
}
|
||||
|
||||
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
||||
match event {
|
||||
DaemonEvent::GuestChanged(changed) => {
|
||||
let Some(ref guest) = changed.guest else {
|
||||
return Ok(());
|
||||
};
|
||||
let DaemonEvent::GuestChanged(changed) = event;
|
||||
let Some(ref guest) = changed.guest else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let Some(ref state) = guest.state else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(ref state) = guest.state else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let status = state.status();
|
||||
let id = Uuid::from_str(&guest.id)?;
|
||||
let domid = state.domid;
|
||||
match status {
|
||||
GuestStatus::Started => {
|
||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||
let subscribe =
|
||||
self.idm.subscribe(domid, self.idm_sender.clone()).await?;
|
||||
e.insert((id, subscribe));
|
||||
let status = state.status();
|
||||
let id = Uuid::from_str(&guest.id)?;
|
||||
let domid = state.domid;
|
||||
match status {
|
||||
GuestStatus::Started => {
|
||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||
let client = self.idm.client(domid).await?;
|
||||
let mut receiver = client.subscribe().await?;
|
||||
let sender = self.idm_sender.clone();
|
||||
let task = tokio::task::spawn(async move {
|
||||
loop {
|
||||
let Ok(event) = receiver.recv().await else {
|
||||
break;
|
||||
};
|
||||
|
||||
if let Err(error) = sender.send((domid, event)).await {
|
||||
warn!("unable to deliver idm event: {}", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GuestStatus::Destroyed => {
|
||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||
handle.unsubscribe().await?;
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
});
|
||||
e.insert((id, task));
|
||||
}
|
||||
}
|
||||
|
||||
GuestStatus::Destroyed => {
|
||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_idm_packet(&mut self, id: Uuid, packet: IdmPacket) -> Result<()> {
|
||||
if let Some(Event::Exit(exit)) = packet.event.and_then(|x| x.event) {
|
||||
self.handle_exit_code(id, exit.code).await?;
|
||||
async fn handle_idm_event(&mut self, id: Uuid, event: IdmEvent) -> Result<()> {
|
||||
match event.event {
|
||||
Some(Event::Exit(exit)) => self.handle_exit_code(id, exit.code).await,
|
||||
None => Ok(()),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||
@ -142,9 +148,9 @@ impl DaemonEventGenerator {
|
||||
async fn evaluate(&mut self) -> Result<()> {
|
||||
select! {
|
||||
x = self.idm_receiver.recv() => match x {
|
||||
Some((domid, packet)) => {
|
||||
Some((domid, event)) => {
|
||||
if let Some((id, _)) = self.idms.get(&domid) {
|
||||
self.handle_idm_packet(*id, packet).await?;
|
||||
self.handle_idm_event(*id, event).await?;
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
@ -159,7 +165,7 @@ impl DaemonEventGenerator {
|
||||
Err(error) => {
|
||||
Err(error.into())
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,53 +1,46 @@
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use bytes::{Buf, BytesMut};
|
||||
use krata::idm::protocol::IdmPacket;
|
||||
use krata::idm::{
|
||||
client::{IdmBackend, IdmClient},
|
||||
protocol::IdmPacket,
|
||||
};
|
||||
use kratart::channel::ChannelService;
|
||||
use log::{error, warn};
|
||||
use prost::Message;
|
||||
use tokio::{
|
||||
select,
|
||||
sync::{
|
||||
mpsc::{Receiver, Sender},
|
||||
broadcast,
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Mutex,
|
||||
},
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
type ListenerMap = Arc<Mutex<HashMap<u32, Sender<(u32, IdmPacket)>>>>;
|
||||
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmPacket>>>>;
|
||||
type ClientMap = Arc<Mutex<HashMap<u32, IdmClient>>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonIdmHandle {
|
||||
listeners: ListenerMap,
|
||||
clients: ClientMap,
|
||||
feeds: BackendFeedMap,
|
||||
tx_sender: Sender<(u32, IdmPacket)>,
|
||||
task: Arc<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonIdmSubscribeHandle {
|
||||
domid: u32,
|
||||
listeners: ListenerMap,
|
||||
}
|
||||
|
||||
impl DaemonIdmSubscribeHandle {
|
||||
pub async fn unsubscribe(&self) -> Result<()> {
|
||||
let mut guard = self.listeners.lock().await;
|
||||
let _ = guard.remove(&self.domid);
|
||||
Ok(())
|
||||
}
|
||||
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
||||
}
|
||||
|
||||
impl DaemonIdmHandle {
|
||||
pub async fn subscribe(
|
||||
&self,
|
||||
domid: u32,
|
||||
sender: Sender<(u32, IdmPacket)>,
|
||||
) -> Result<DaemonIdmSubscribeHandle> {
|
||||
let mut guard = self.listeners.lock().await;
|
||||
guard.insert(domid, sender);
|
||||
Ok(DaemonIdmSubscribeHandle {
|
||||
domid,
|
||||
listeners: self.listeners.clone(),
|
||||
})
|
||||
pub fn snoop(&self) -> broadcast::Receiver<DaemonIdmSnoopPacket> {
|
||||
self.snoop_sender.subscribe()
|
||||
}
|
||||
|
||||
pub async fn client(&self, domid: u32) -> Result<IdmClient> {
|
||||
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,70 +52,137 @@ impl Drop for DaemonIdmHandle {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonIdmSnoopPacket {
|
||||
pub from: u32,
|
||||
pub to: u32,
|
||||
pub packet: IdmPacket,
|
||||
}
|
||||
|
||||
pub struct DaemonIdm {
|
||||
listeners: ListenerMap,
|
||||
receiver: Receiver<(u32, Vec<u8>)>,
|
||||
clients: ClientMap,
|
||||
feeds: BackendFeedMap,
|
||||
tx_sender: Sender<(u32, IdmPacket)>,
|
||||
tx_raw_sender: Sender<(u32, Vec<u8>)>,
|
||||
tx_receiver: Receiver<(u32, IdmPacket)>,
|
||||
rx_receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||
snoop_sender: broadcast::Sender<DaemonIdmSnoopPacket>,
|
||||
task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl DaemonIdm {
|
||||
pub async fn new() -> Result<DaemonIdm> {
|
||||
let (service, receiver) = ChannelService::new("krata-channel".to_string()).await?;
|
||||
let (service, tx_raw_sender, rx_receiver) =
|
||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||
let (tx_sender, tx_receiver) = channel(100);
|
||||
let (snoop_sender, _) = broadcast::channel(100);
|
||||
let task = service.launch().await?;
|
||||
let listeners = Arc::new(Mutex::new(HashMap::new()));
|
||||
let clients = Arc::new(Mutex::new(HashMap::new()));
|
||||
let feeds = Arc::new(Mutex::new(HashMap::new()));
|
||||
Ok(DaemonIdm {
|
||||
receiver,
|
||||
rx_receiver,
|
||||
tx_receiver,
|
||||
tx_sender,
|
||||
tx_raw_sender,
|
||||
snoop_sender,
|
||||
task,
|
||||
listeners,
|
||||
clients,
|
||||
feeds,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn launch(mut self) -> Result<DaemonIdmHandle> {
|
||||
let listeners = self.listeners.clone();
|
||||
let clients = self.clients.clone();
|
||||
let feeds = self.feeds.clone();
|
||||
let tx_sender = self.tx_sender.clone();
|
||||
let snoop_sender = self.snoop_sender.clone();
|
||||
let task = tokio::task::spawn(async move {
|
||||
let mut buffers: HashMap<u32, BytesMut> = HashMap::new();
|
||||
if let Err(error) = self.process(&mut buffers).await {
|
||||
|
||||
while let Err(error) = self.process(&mut buffers).await {
|
||||
error!("failed to process idm: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(DaemonIdmHandle {
|
||||
listeners,
|
||||
clients,
|
||||
feeds,
|
||||
tx_sender,
|
||||
snoop_sender,
|
||||
task: Arc::new(task),
|
||||
})
|
||||
}
|
||||
|
||||
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
|
||||
loop {
|
||||
let Some((domid, data)) = self.receiver.recv().await else {
|
||||
break;
|
||||
};
|
||||
select! {
|
||||
x = self.rx_receiver.recv() => match x {
|
||||
Some((domid, data)) => {
|
||||
if let Some(data) = data {
|
||||
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
||||
buffer.extend_from_slice(&data);
|
||||
if buffer.len() < 6 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
||||
buffer.extend_from_slice(&data);
|
||||
if buffer.len() < 2 {
|
||||
continue;
|
||||
}
|
||||
let size = (buffer[0] as u16 | (buffer[1] as u16) << 8) as usize;
|
||||
let needed = size + 2;
|
||||
if buffer.len() < needed {
|
||||
continue;
|
||||
}
|
||||
let mut packet = buffer.split_to(needed);
|
||||
packet.advance(2);
|
||||
match IdmPacket::decode(packet) {
|
||||
Ok(packet) => {
|
||||
let guard = self.listeners.lock().await;
|
||||
if let Some(sender) = guard.get(&domid) {
|
||||
if let Err(error) = sender.try_send((domid, packet)) {
|
||||
warn!("dropped idm packet from domain {}: {}", domid, error);
|
||||
if buffer[0] != 0xff || buffer[1] != 0xff {
|
||||
buffer.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
let size = (buffer[2] as u32 | (buffer[3] as u32) << 8 | (buffer[4] as u32) << 16 | (buffer[5] as u32) << 24) as usize;
|
||||
let needed = size + 6;
|
||||
if buffer.len() < needed {
|
||||
continue;
|
||||
}
|
||||
let mut packet = buffer.split_to(needed);
|
||||
packet.advance(6);
|
||||
match IdmPacket::decode(packet) {
|
||||
Ok(packet) => {
|
||||
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
|
||||
let guard = self.feeds.lock().await;
|
||||
if let Some(feed) = guard.get(&domid) {
|
||||
let _ = feed.try_send(packet.clone());
|
||||
}
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: domid, to: 0, packet });
|
||||
}
|
||||
|
||||
Err(packet) => {
|
||||
warn!("received invalid packet from domain {}: {}", domid, packet);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut clients = self.clients.lock().await;
|
||||
let mut feeds = self.feeds.lock().await;
|
||||
clients.remove(&domid);
|
||||
feeds.remove(&domid);
|
||||
}
|
||||
},
|
||||
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
},
|
||||
x = self.tx_receiver.recv() => match x {
|
||||
Some((domid, packet)) => {
|
||||
let data = packet.encode_to_vec();
|
||||
let mut buffer = vec![0u8; 6];
|
||||
let length = data.len() as u32;
|
||||
buffer[0] = 0xff;
|
||||
buffer[1] = 0xff;
|
||||
buffer[2] = length as u8;
|
||||
buffer[3] = (length << 8) as u8;
|
||||
buffer[4] = (length << 16) as u8;
|
||||
buffer[5] = (length << 24) as u8;
|
||||
buffer.extend_from_slice(&data);
|
||||
self.tx_raw_sender.send((domid, buffer)).await?;
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: 0, to: domid, packet });
|
||||
},
|
||||
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Err(packet) => {
|
||||
warn!("received invalid packet from domain {}: {}", domid, packet);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -133,3 +193,50 @@ impl Drop for DaemonIdm {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
async fn client_or_create(
|
||||
domid: u32,
|
||||
tx_sender: &Sender<(u32, IdmPacket)>,
|
||||
clients: &ClientMap,
|
||||
feeds: &BackendFeedMap,
|
||||
) -> Result<IdmClient> {
|
||||
let mut clients = clients.lock().await;
|
||||
let mut feeds = feeds.lock().await;
|
||||
match clients.entry(domid) {
|
||||
Entry::Occupied(entry) => Ok(entry.get().clone()),
|
||||
Entry::Vacant(entry) => {
|
||||
let (rx_sender, rx_receiver) = channel(100);
|
||||
feeds.insert(domid, rx_sender);
|
||||
let backend = IdmDaemonBackend {
|
||||
domid,
|
||||
rx_receiver,
|
||||
tx_sender: tx_sender.clone(),
|
||||
};
|
||||
let client = IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await?;
|
||||
entry.insert(client.clone());
|
||||
Ok(client)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IdmDaemonBackend {
|
||||
domid: u32,
|
||||
rx_receiver: Receiver<IdmPacket>,
|
||||
tx_sender: Sender<(u32, IdmPacket)>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl IdmBackend for IdmDaemonBackend {
|
||||
async fn recv(&mut self) -> Result<IdmPacket> {
|
||||
if let Some(packet) = self.rx_receiver.recv().await {
|
||||
Ok(packet)
|
||||
} else {
|
||||
Err(anyhow!("idm receive channel closed"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
|
||||
self.tx_sender.send((self.domid, packet)).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,15 +1,18 @@
|
||||
use std::{net::SocketAddr, path::PathBuf, str::FromStr};
|
||||
|
||||
use anyhow::Result;
|
||||
use control::RuntimeControlService;
|
||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||
use control::DaemonControlService;
|
||||
use db::GuestStore;
|
||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||
use kratart::Runtime;
|
||||
use log::info;
|
||||
use reconcile::guest::GuestReconciler;
|
||||
use tokio::{
|
||||
fs,
|
||||
net::UnixListener,
|
||||
sync::mpsc::{channel, Sender},
|
||||
task::JoinHandle,
|
||||
@ -18,60 +21,83 @@ use tokio_stream::wrappers::UnixListenerStream;
|
||||
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub mod console;
|
||||
pub mod control;
|
||||
pub mod db;
|
||||
pub mod event;
|
||||
pub mod idm;
|
||||
pub mod metrics;
|
||||
pub mod oci;
|
||||
pub mod reconcile;
|
||||
|
||||
pub struct Daemon {
|
||||
store: String,
|
||||
runtime: Runtime,
|
||||
guests: GuestStore,
|
||||
events: DaemonEventContext,
|
||||
guest_reconciler_task: JoinHandle<()>,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
generator_task: JoinHandle<()>,
|
||||
_idm: DaemonIdmHandle,
|
||||
idm: DaemonIdmHandle,
|
||||
console: DaemonConsoleHandle,
|
||||
packer: OciPackerService,
|
||||
}
|
||||
|
||||
const GUEST_RECONCILER_QUEUE_LEN: usize = 1000;
|
||||
|
||||
impl Daemon {
|
||||
pub async fn new(store: String, runtime: Runtime) -> Result<Self> {
|
||||
pub async fn new(store: String) -> Result<Self> {
|
||||
let mut image_cache_dir = PathBuf::from(store.clone());
|
||||
image_cache_dir.push("cache");
|
||||
image_cache_dir.push("image");
|
||||
fs::create_dir_all(&image_cache_dir).await?;
|
||||
|
||||
let packer = OciPackerService::new(None, &image_cache_dir, OciPlatform::current())?;
|
||||
|
||||
let runtime = Runtime::new(store.clone()).await?;
|
||||
let guests_db_path = format!("{}/guests.db", store);
|
||||
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
||||
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
||||
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
||||
let idm = DaemonIdm::new().await?;
|
||||
let idm = idm.launch().await?;
|
||||
let console = DaemonConsole::new().await?;
|
||||
let console = console.launch().await?;
|
||||
let (events, generator) =
|
||||
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
||||
.await?;
|
||||
let runtime_for_reconciler = runtime.dupe().await?;
|
||||
let guest_reconciler =
|
||||
GuestReconciler::new(guests.clone(), events.clone(), runtime_for_reconciler)?;
|
||||
let guest_reconciler = GuestReconciler::new(
|
||||
guests.clone(),
|
||||
events.clone(),
|
||||
runtime_for_reconciler,
|
||||
packer.clone(),
|
||||
guest_reconciler_notify.clone(),
|
||||
)?;
|
||||
|
||||
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
||||
let generator_task = generator.launch().await?;
|
||||
|
||||
Ok(Self {
|
||||
store,
|
||||
runtime,
|
||||
guests,
|
||||
events,
|
||||
guest_reconciler_task,
|
||||
guest_reconciler_notify,
|
||||
generator_task,
|
||||
_idm: idm,
|
||||
idm,
|
||||
console,
|
||||
packer,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
||||
let control_service = RuntimeControlService::new(
|
||||
let control_service = DaemonControlService::new(
|
||||
self.events.clone(),
|
||||
self.runtime.clone(),
|
||||
self.console.clone(),
|
||||
self.idm.clone(),
|
||||
self.guests.clone(),
|
||||
self.guest_reconciler_notify.clone(),
|
||||
self.packer.clone(),
|
||||
);
|
||||
|
||||
let mut server = Server::builder();
|
||||
@ -97,7 +123,7 @@ impl Daemon {
|
||||
ControlDialAddress::UnixSocket { path } => {
|
||||
let path = PathBuf::from(path);
|
||||
if path.exists() {
|
||||
tokio::fs::remove_file(&path).await?;
|
||||
fs::remove_file(&path).await?;
|
||||
}
|
||||
let listener = UnixListener::bind(path)?;
|
||||
let stream = UnixListenerStream::new(listener);
|
||||
|
27
crates/daemon/src/metrics.rs
Normal file
27
crates/daemon/src/metrics.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use krata::{
|
||||
idm::protocol::{IdmMetricFormat, IdmMetricNode},
|
||||
v1::common::{GuestMetricFormat, GuestMetricNode},
|
||||
};
|
||||
|
||||
fn idm_metric_format_to_api(format: IdmMetricFormat) -> GuestMetricFormat {
|
||||
match format {
|
||||
IdmMetricFormat::Unknown => GuestMetricFormat::Unknown,
|
||||
IdmMetricFormat::Bytes => GuestMetricFormat::Bytes,
|
||||
IdmMetricFormat::Integer => GuestMetricFormat::Integer,
|
||||
IdmMetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn idm_metric_to_api(node: IdmMetricNode) -> GuestMetricNode {
|
||||
let format = node.format();
|
||||
GuestMetricNode {
|
||||
name: node.name,
|
||||
value: node.value,
|
||||
format: idm_metric_format_to_api(format).into(),
|
||||
children: node
|
||||
.children
|
||||
.into_iter()
|
||||
.map(idm_metric_to_api)
|
||||
.collect::<Vec<_>>(),
|
||||
}
|
||||
}
|
41
crates/daemon/src/oci.rs
Normal file
41
crates/daemon/src/oci.rs
Normal file
@ -0,0 +1,41 @@
|
||||
use krata::v1::control::{
|
||||
PullImageProgress, PullImageProgressLayer, PullImageProgressLayerPhase, PullImageProgressPhase,
|
||||
};
|
||||
use krataoci::progress::{OciProgress, OciProgressLayer, OciProgressLayerPhase, OciProgressPhase};
|
||||
|
||||
fn convert_oci_layer_progress(layer: OciProgressLayer) -> PullImageProgressLayer {
|
||||
PullImageProgressLayer {
|
||||
id: layer.id,
|
||||
phase: match layer.phase {
|
||||
OciProgressLayerPhase::Waiting => PullImageProgressLayerPhase::Waiting,
|
||||
OciProgressLayerPhase::Downloading => PullImageProgressLayerPhase::Downloading,
|
||||
OciProgressLayerPhase::Downloaded => PullImageProgressLayerPhase::Downloaded,
|
||||
OciProgressLayerPhase::Extracting => PullImageProgressLayerPhase::Extracting,
|
||||
OciProgressLayerPhase::Extracted => PullImageProgressLayerPhase::Extracted,
|
||||
}
|
||||
.into(),
|
||||
value: layer.value,
|
||||
total: layer.total,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_oci_progress(oci: OciProgress) -> PullImageProgress {
|
||||
PullImageProgress {
|
||||
phase: match oci.phase {
|
||||
OciProgressPhase::Resolving => PullImageProgressPhase::Resolving,
|
||||
OciProgressPhase::Resolved => PullImageProgressPhase::Resolved,
|
||||
OciProgressPhase::ConfigAcquire => PullImageProgressPhase::ConfigAcquire,
|
||||
OciProgressPhase::LayerAcquire => PullImageProgressPhase::LayerAcquire,
|
||||
OciProgressPhase::Packing => PullImageProgressPhase::Packing,
|
||||
OciProgressPhase::Complete => PullImageProgressPhase::Complete,
|
||||
}
|
||||
.into(),
|
||||
layers: oci
|
||||
.layers
|
||||
.into_values()
|
||||
.map(convert_oci_layer_progress)
|
||||
.collect::<Vec<_>>(),
|
||||
value: oci.value,
|
||||
total: oci.total,
|
||||
}
|
||||
}
|
@ -1,16 +1,30 @@
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use krata::launchcfg::LaunchPackedFormat;
|
||||
use krata::v1::{
|
||||
common::{
|
||||
guest_image_spec::Image, Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState,
|
||||
GuestState, GuestStatus,
|
||||
GuestOciImageFormat, GuestState, GuestStatus,
|
||||
},
|
||||
control::GuestChangedEvent,
|
||||
};
|
||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||
use kratart::{launch::GuestLaunchRequest, GuestInfo, Runtime};
|
||||
use log::{error, info, trace, warn};
|
||||
use tokio::{select, sync::mpsc::Receiver, task::JoinHandle, time::sleep};
|
||||
use tokio::{
|
||||
select,
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Mutex, RwLock,
|
||||
},
|
||||
task::JoinHandle,
|
||||
time::sleep,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
@ -18,18 +32,52 @@ use crate::{
|
||||
event::{DaemonEvent, DaemonEventContext},
|
||||
};
|
||||
|
||||
const PARALLEL_LIMIT: u32 = 5;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum GuestReconcilerResult {
|
||||
Unchanged,
|
||||
Changed { rerun: bool },
|
||||
}
|
||||
|
||||
struct GuestReconcilerEntry {
|
||||
task: JoinHandle<()>,
|
||||
sender: Sender<()>,
|
||||
}
|
||||
|
||||
impl Drop for GuestReconcilerEntry {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GuestReconciler {
|
||||
guests: GuestStore,
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
packer: OciPackerService,
|
||||
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
reconcile_lock: Arc<RwLock<()>>,
|
||||
}
|
||||
|
||||
impl GuestReconciler {
|
||||
pub fn new(guests: GuestStore, events: DaemonEventContext, runtime: Runtime) -> Result<Self> {
|
||||
pub fn new(
|
||||
guests: GuestStore,
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
packer: OciPackerService,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
guests,
|
||||
events,
|
||||
runtime,
|
||||
packer,
|
||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||
guest_reconciler_notify,
|
||||
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||
})
|
||||
}
|
||||
|
||||
@ -47,8 +95,15 @@ impl GuestReconciler {
|
||||
},
|
||||
|
||||
Some(uuid) => {
|
||||
if let Err(error) = self.reconcile(uuid).await {
|
||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
||||
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
||||
error!("failed to start guest reconciler task {}: {}", uuid, error);
|
||||
}
|
||||
|
||||
let map = self.tasks.lock().await;
|
||||
if let Some(entry) = map.get(&uuid) {
|
||||
if let Err(error) = entry.sender.send(()).await {
|
||||
error!("failed to notify guest reconciler task {}: {}", uuid, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -64,6 +119,7 @@ impl GuestReconciler {
|
||||
}
|
||||
|
||||
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
||||
let _permit = self.reconcile_lock.write().await;
|
||||
trace!("reconciling runtime");
|
||||
let runtime_guests = self.runtime.list().await?;
|
||||
let stored_guests = self.guests.list().await?;
|
||||
@ -96,21 +152,20 @@ impl GuestReconciler {
|
||||
|
||||
if changed || initial {
|
||||
self.guests.update(uuid, stored_guest).await?;
|
||||
if let Err(error) = self.reconcile(uuid).await {
|
||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
||||
}
|
||||
let _ = self.guest_reconciler_notify.try_send(uuid);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn reconcile(&self, uuid: Uuid) -> Result<()> {
|
||||
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
||||
let _runtime_reconcile_permit = self.reconcile_lock.read().await;
|
||||
let Some(mut guest) = self.guests.read(uuid).await? else {
|
||||
warn!(
|
||||
"notified of reconcile for guest {} but it didn't exist",
|
||||
uuid
|
||||
);
|
||||
return Ok(());
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
info!("reconciling guest {}", uuid);
|
||||
@ -120,47 +175,55 @@ impl GuestReconciler {
|
||||
guest: Some(guest.clone()),
|
||||
}))?;
|
||||
|
||||
let result = match guest.state.as_ref().map(|x| x.status()).unwrap_or_default() {
|
||||
let start_status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let result = match start_status {
|
||||
GuestStatus::Starting => self.start(uuid, &mut guest).await,
|
||||
GuestStatus::Destroying | GuestStatus::Exited => self.destroy(uuid, &mut guest).await,
|
||||
_ => Ok(false),
|
||||
GuestStatus::Exited => self.exited(&mut guest).await,
|
||||
GuestStatus::Destroying => self.destroy(uuid, &mut guest).await,
|
||||
_ => Ok(GuestReconcilerResult::Unchanged),
|
||||
};
|
||||
|
||||
let changed = match result {
|
||||
Ok(changed) => changed,
|
||||
let result = match result {
|
||||
Ok(result) => result,
|
||||
Err(error) => {
|
||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
||||
guest.state.as_mut().unwrap().status = GuestStatus::Failed.into();
|
||||
guest.state.as_mut().unwrap().error_info = Some(GuestErrorInfo {
|
||||
message: error.to_string(),
|
||||
});
|
||||
true
|
||||
warn!("failed to start guest {}: {}", guest.id, error);
|
||||
GuestReconcilerResult::Changed { rerun: false }
|
||||
}
|
||||
};
|
||||
|
||||
info!("reconciled guest {}", uuid);
|
||||
|
||||
let status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let destroyed = status == GuestStatus::Destroyed || status == GuestStatus::Failed;
|
||||
let destroyed = status == GuestStatus::Destroyed;
|
||||
|
||||
if changed {
|
||||
let rerun = if let GuestReconcilerResult::Changed { rerun } = result {
|
||||
let event = DaemonEvent::GuestChanged(GuestChangedEvent {
|
||||
guest: Some(guest.clone()),
|
||||
});
|
||||
|
||||
if destroyed {
|
||||
self.guests.remove(uuid).await?;
|
||||
let mut map = self.tasks.lock().await;
|
||||
map.remove(&uuid);
|
||||
} else {
|
||||
self.guests.update(uuid, guest.clone()).await?;
|
||||
}
|
||||
|
||||
self.events.send(event)?;
|
||||
}
|
||||
rerun
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
Ok(())
|
||||
Ok(rerun)
|
||||
}
|
||||
|
||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<bool> {
|
||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
let Some(ref spec) = guest.spec else {
|
||||
return Err(anyhow!("guest spec not specified"));
|
||||
};
|
||||
@ -174,19 +237,38 @@ impl GuestReconciler {
|
||||
return Err(anyhow!("oci spec not specified"));
|
||||
}
|
||||
};
|
||||
|
||||
let task = spec.task.as_ref().cloned().unwrap_or_default();
|
||||
|
||||
let image = self
|
||||
.packer
|
||||
.recall(
|
||||
&oci.digest,
|
||||
match oci.format() {
|
||||
GuestOciImageFormat::Unknown => OciPackedFormat::Squashfs,
|
||||
GuestOciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||
GuestOciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let Some(image) = image else {
|
||||
return Err(anyhow!(
|
||||
"image {} in the requested format did not exist",
|
||||
oci.digest
|
||||
));
|
||||
};
|
||||
|
||||
let info = self
|
||||
.runtime
|
||||
.launch(GuestLaunchRequest {
|
||||
format: LaunchPackedFormat::Squashfs,
|
||||
uuid: Some(uuid),
|
||||
name: if spec.name.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(&spec.name)
|
||||
Some(spec.name.clone())
|
||||
},
|
||||
image: &oci.image,
|
||||
image,
|
||||
vcpus: spec.vcpus,
|
||||
mem: spec.mem,
|
||||
env: task
|
||||
@ -206,10 +288,19 @@ impl GuestReconciler {
|
||||
error_info: None,
|
||||
domid: info.domid,
|
||||
});
|
||||
Ok(true)
|
||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||
}
|
||||
|
||||
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<bool> {
|
||||
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
if let Some(ref mut state) = guest.state {
|
||||
state.set_status(GuestStatus::Destroying);
|
||||
Ok(GuestReconcilerResult::Changed { rerun: true })
|
||||
} else {
|
||||
Ok(GuestReconcilerResult::Unchanged)
|
||||
}
|
||||
}
|
||||
|
||||
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
if let Err(error) = self.runtime.destroy(uuid).await {
|
||||
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
||||
}
|
||||
@ -222,7 +313,46 @@ impl GuestReconciler {
|
||||
error_info: None,
|
||||
domid: guest.state.as_ref().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||
});
|
||||
Ok(true)
|
||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||
}
|
||||
|
||||
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
||||
let mut map = self.tasks.lock().await;
|
||||
match map.entry(uuid) {
|
||||
Entry::Occupied(_) => {}
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(self.launch_task(uuid).await?);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn launch_task(&self, uuid: Uuid) -> Result<GuestReconcilerEntry> {
|
||||
let this = self.clone();
|
||||
let (sender, mut receiver) = channel(10);
|
||||
let task = tokio::task::spawn(async move {
|
||||
'notify_loop: loop {
|
||||
if receiver.recv().await.is_none() {
|
||||
break 'notify_loop;
|
||||
}
|
||||
|
||||
'rerun_loop: loop {
|
||||
let rerun = match this.reconcile(uuid).await {
|
||||
Ok(rerun) => rerun,
|
||||
Err(error) => {
|
||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if rerun {
|
||||
continue 'rerun_loop;
|
||||
}
|
||||
break 'rerun_loop;
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(GuestReconcilerEntry { task, sender })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
name = "krata-guest"
|
||||
description = "Guest services for the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -10,11 +10,12 @@ resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
cgroups-rs = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
ipnetwork = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.3" }
|
||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.3" }
|
||||
krata = { path = "../krata", version = "^0.0.9" }
|
||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
|
||||
libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
nix = { workspace = true, features = ["ioctl", "process", "fs"] }
|
||||
@ -24,8 +25,8 @@ rtnetlink = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sys-mount = { workspace = true }
|
||||
sysinfo = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "krataguest"
|
||||
|
@ -23,6 +23,8 @@ async fn main() -> Result<()> {
|
||||
if let Err(error) = guest.init().await {
|
||||
error!("failed to initialize guest: {}", error);
|
||||
death(127).await?;
|
||||
return Ok(());
|
||||
}
|
||||
death(1).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,42 +1,70 @@
|
||||
use crate::{
|
||||
childwait::{ChildEvent, ChildWait},
|
||||
death,
|
||||
metrics::MetricsCollector,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use cgroups_rs::Cgroup;
|
||||
use krata::idm::{
|
||||
client::IdmClient,
|
||||
protocol::{idm_event::Event, IdmEvent, IdmExitEvent, IdmPacket},
|
||||
protocol::{
|
||||
idm_event::Event, idm_request::Request, idm_response::Response, IdmEvent, IdmExitEvent,
|
||||
IdmMetricsResponse, IdmPingResponse, IdmRequest,
|
||||
},
|
||||
};
|
||||
use log::error;
|
||||
use log::debug;
|
||||
use nix::unistd::Pid;
|
||||
use tokio::select;
|
||||
use tokio::{select, sync::broadcast};
|
||||
|
||||
pub struct GuestBackground {
|
||||
idm: IdmClient,
|
||||
child: Pid,
|
||||
_cgroup: Cgroup,
|
||||
wait: ChildWait,
|
||||
}
|
||||
|
||||
impl GuestBackground {
|
||||
pub async fn new(idm: IdmClient, child: Pid) -> Result<GuestBackground> {
|
||||
pub async fn new(idm: IdmClient, cgroup: Cgroup, child: Pid) -> Result<GuestBackground> {
|
||||
Ok(GuestBackground {
|
||||
idm,
|
||||
child,
|
||||
_cgroup: cgroup,
|
||||
wait: ChildWait::new()?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
let mut event_subscription = self.idm.subscribe().await?;
|
||||
let mut requests_subscription = self.idm.requests().await?;
|
||||
loop {
|
||||
select! {
|
||||
x = self.idm.receiver.recv() => match x {
|
||||
Some(_packet) => {
|
||||
x = event_subscription.recv() => match x {
|
||||
Ok(_event) => {
|
||||
|
||||
},
|
||||
|
||||
None => {
|
||||
error!("idm packet channel closed");
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
debug!("idm packet channel closed");
|
||||
break;
|
||||
},
|
||||
|
||||
_ => {
|
||||
continue;
|
||||
}
|
||||
},
|
||||
|
||||
x = requests_subscription.recv() => match x {
|
||||
Ok(request) => {
|
||||
self.handle_idm_request(request).await?;
|
||||
},
|
||||
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
debug!("idm packet channel closed");
|
||||
break;
|
||||
},
|
||||
|
||||
_ => {
|
||||
continue;
|
||||
}
|
||||
},
|
||||
|
||||
@ -51,14 +79,34 @@ impl GuestBackground {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_idm_request(&mut self, packet: IdmRequest) -> Result<()> {
|
||||
let id = packet.id;
|
||||
|
||||
match packet.request {
|
||||
Some(Request::Ping(_)) => {
|
||||
self.idm
|
||||
.respond(id, Response::Ping(IdmPingResponse {}))
|
||||
.await?;
|
||||
}
|
||||
|
||||
Some(Request::Metrics(_)) => {
|
||||
let metrics = MetricsCollector::new()?;
|
||||
let root = metrics.collect()?;
|
||||
let response = IdmMetricsResponse { root: Some(root) };
|
||||
|
||||
self.idm.respond(id, Response::Metrics(response)).await?;
|
||||
}
|
||||
|
||||
None => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn child_event(&mut self, event: ChildEvent) -> Result<()> {
|
||||
if event.pid == self.child {
|
||||
self.idm
|
||||
.sender
|
||||
.send(IdmPacket {
|
||||
event: Some(IdmEvent {
|
||||
event: Some(Event::Exit(IdmExitEvent { code: event.status })),
|
||||
}),
|
||||
.emit(IdmEvent {
|
||||
event: Some(Event::Exit(IdmExitEvent { code: event.status })),
|
||||
})
|
||||
.await?;
|
||||
death(event.status).await?;
|
||||
|
@ -1,10 +1,11 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use cgroups_rs::{Cgroup, CgroupPid};
|
||||
use futures::stream::TryStreamExt;
|
||||
use ipnetwork::IpNetwork;
|
||||
use krata::ethtool::EthtoolHandle;
|
||||
use krata::idm::client::IdmClient;
|
||||
use krata::launchcfg::{LaunchInfo, LaunchNetwork};
|
||||
use libc::{setsid, TIOCSCTTY};
|
||||
use krata::launchcfg::{LaunchInfo, LaunchNetwork, LaunchPackedFormat};
|
||||
use libc::{sethostname, setsid, TIOCSCTTY};
|
||||
use log::{trace, warn};
|
||||
use nix::ioctl_write_int_bad;
|
||||
use nix::unistd::{dup2, execve, fork, ForkResult, Pid};
|
||||
@ -16,14 +17,12 @@ use std::fs::{File, OpenOptions, Permissions};
|
||||
use std::io;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::os::linux::fs::MetadataExt;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs::{chroot, symlink, PermissionsExt};
|
||||
use std::os::unix::fs::{chroot, PermissionsExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use sys_mount::{FilesystemType, Mount, MountFlags};
|
||||
use tokio::fs;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::background::GuestBackground;
|
||||
|
||||
@ -81,18 +80,39 @@ impl GuestInit {
|
||||
let idm = IdmClient::open("/dev/hvc1")
|
||||
.await
|
||||
.map_err(|x| anyhow!("failed to open idm client: {}", x))?;
|
||||
self.mount_squashfs_images().await?;
|
||||
self.mount_config_image().await?;
|
||||
|
||||
let config = self.parse_image_config().await?;
|
||||
let launch = self.parse_launch_config().await?;
|
||||
|
||||
self.mount_root_image(launch.root.format.clone()).await?;
|
||||
|
||||
self.mount_new_root().await?;
|
||||
self.nuke_initrd().await?;
|
||||
self.bind_new_root().await?;
|
||||
|
||||
if let Some(hostname) = launch.hostname.clone() {
|
||||
let result = unsafe {
|
||||
sethostname(
|
||||
hostname.as_bytes().as_ptr() as *mut libc::c_char,
|
||||
hostname.len(),
|
||||
)
|
||||
};
|
||||
if result != 0 {
|
||||
warn!("failed to set hostname: {}", result);
|
||||
}
|
||||
|
||||
let etc = PathBuf::from_str("/etc")?;
|
||||
if !etc.exists() {
|
||||
fs::create_dir(&etc).await?;
|
||||
}
|
||||
let mut etc_hostname = etc;
|
||||
etc_hostname.push("hostname");
|
||||
fs::write(&etc_hostname, hostname + "\n").await?;
|
||||
}
|
||||
|
||||
if let Some(network) = &launch.network {
|
||||
trace!("initializing network");
|
||||
if let Err(error) = self.network_setup(network).await {
|
||||
if let Err(error) = self.network_setup(&launch, network).await {
|
||||
warn!("failed to initialize network: {}", error);
|
||||
}
|
||||
}
|
||||
@ -112,14 +132,20 @@ impl GuestInit {
|
||||
trace!("early init");
|
||||
self.create_dir("/dev", Some(0o0755)).await?;
|
||||
self.create_dir("/proc", None).await?;
|
||||
self.create_dir("/sys", None).await?;
|
||||
self.create_dir("/sys", Some(0o0555)).await?;
|
||||
self.create_dir("/root", Some(0o0700)).await?;
|
||||
self.create_dir("/tmp", None).await?;
|
||||
self.mount_kernel_fs("devtmpfs", "/dev", "mode=0755")
|
||||
self.create_dir("/run", Some(0o0755)).await?;
|
||||
self.mount_kernel_fs("devtmpfs", "/dev", "mode=0755", None)
|
||||
.await?;
|
||||
self.mount_kernel_fs("proc", "/proc", "", None).await?;
|
||||
self.mount_kernel_fs("sysfs", "/sys", "", None).await?;
|
||||
fs::symlink("/proc/self/fd", "/dev/fd").await?;
|
||||
fs::symlink("/proc/self/fd/0", "/dev/stdin").await?;
|
||||
fs::symlink("/proc/self/fd/1", "/dev/stdout").await?;
|
||||
fs::symlink("/proc/self/fd/2", "/dev/stderr").await?;
|
||||
self.mount_kernel_fs("cgroup2", "/sys/fs/cgroup", "", Some(MountFlags::RELATIME))
|
||||
.await?;
|
||||
self.mount_kernel_fs("proc", "/proc", "").await?;
|
||||
self.mount_kernel_fs("sysfs", "/sys", "").await?;
|
||||
symlink("/proc/self/fd", "/dev/fd")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -137,16 +163,19 @@ impl GuestInit {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mount_kernel_fs(&mut self, fstype: &str, path: &str, data: &str) -> Result<()> {
|
||||
let metadata = fs::metadata(path).await?;
|
||||
if metadata.st_dev() == fs::metadata("/").await?.st_dev() {
|
||||
trace!("mounting kernel fs {} to {}", fstype, path);
|
||||
Mount::builder()
|
||||
.fstype(FilesystemType::Manual(fstype))
|
||||
.flags(MountFlags::NOEXEC | MountFlags::NOSUID)
|
||||
.data(data)
|
||||
.mount(fstype, path)?;
|
||||
}
|
||||
async fn mount_kernel_fs(
|
||||
&mut self,
|
||||
fstype: &str,
|
||||
path: &str,
|
||||
data: &str,
|
||||
flags: Option<MountFlags>,
|
||||
) -> Result<()> {
|
||||
trace!("mounting kernel fs {} to {}", fstype, path);
|
||||
Mount::builder()
|
||||
.fstype(FilesystemType::Manual(fstype))
|
||||
.flags(MountFlags::NOEXEC | MountFlags::NOSUID | flags.unwrap_or(MountFlags::empty()))
|
||||
.data(data)
|
||||
.mount(fstype, path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -158,24 +187,41 @@ impl GuestInit {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mount_squashfs_images(&mut self) -> Result<()> {
|
||||
trace!("mounting squashfs images");
|
||||
let image_mount_path = Path::new(IMAGE_MOUNT_PATH);
|
||||
async fn mount_config_image(&mut self) -> Result<()> {
|
||||
trace!("mounting config image");
|
||||
let config_mount_path = Path::new(CONFIG_MOUNT_PATH);
|
||||
self.mount_squashfs(Path::new(IMAGE_BLOCK_DEVICE_PATH), image_mount_path)
|
||||
.await?;
|
||||
self.mount_squashfs(Path::new(CONFIG_BLOCK_DEVICE_PATH), config_mount_path)
|
||||
self.mount_image(
|
||||
Path::new(CONFIG_BLOCK_DEVICE_PATH),
|
||||
config_mount_path,
|
||||
LaunchPackedFormat::Squashfs,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mount_root_image(&mut self, format: LaunchPackedFormat) -> Result<()> {
|
||||
trace!("mounting root image");
|
||||
let image_mount_path = Path::new(IMAGE_MOUNT_PATH);
|
||||
self.mount_image(Path::new(IMAGE_BLOCK_DEVICE_PATH), image_mount_path, format)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mount_squashfs(&mut self, from: &Path, to: &Path) -> Result<()> {
|
||||
trace!("mounting squashfs image {:?} to {:?}", from, to);
|
||||
async fn mount_image(
|
||||
&mut self,
|
||||
from: &Path,
|
||||
to: &Path,
|
||||
format: LaunchPackedFormat,
|
||||
) -> Result<()> {
|
||||
trace!("mounting {:?} image {:?} to {:?}", format, from, to);
|
||||
if !to.is_dir() {
|
||||
fs::create_dir(to).await?;
|
||||
}
|
||||
Mount::builder()
|
||||
.fstype(FilesystemType::Manual("squashfs"))
|
||||
.fstype(FilesystemType::Manual(match format {
|
||||
LaunchPackedFormat::Squashfs => "squashfs",
|
||||
LaunchPackedFormat::Erofs => "erofs",
|
||||
}))
|
||||
.flags(MountFlags::RDONLY)
|
||||
.mount(from, to)?;
|
||||
Ok(())
|
||||
@ -249,40 +295,6 @@ impl GuestInit {
|
||||
Ok(serde_json::from_str(&content)?)
|
||||
}
|
||||
|
||||
async fn nuke_initrd(&mut self) -> Result<()> {
|
||||
trace!("nuking initrd");
|
||||
let initrd_dev = fs::metadata("/").await?.st_dev();
|
||||
for item in WalkDir::new("/")
|
||||
.same_file_system(true)
|
||||
.follow_links(false)
|
||||
.contents_first(true)
|
||||
{
|
||||
if item.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let item = item?;
|
||||
let metadata = match item.metadata() {
|
||||
Ok(value) => value,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if metadata.st_dev() != initrd_dev {
|
||||
continue;
|
||||
}
|
||||
|
||||
if metadata.is_symlink() || metadata.is_file() {
|
||||
let _ = fs::remove_file(item.path()).await;
|
||||
trace!("deleting file {:?}", item.path());
|
||||
} else if metadata.is_dir() {
|
||||
let _ = fs::remove_dir(item.path()).await;
|
||||
trace!("deleting directory {:?}", item.path());
|
||||
}
|
||||
}
|
||||
trace!("nuked initrd");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn bind_new_root(&mut self) -> Result<()> {
|
||||
self.mount_move_subtree(Path::new(SYS_PATH), Path::new(NEW_ROOT_SYS_PATH))
|
||||
.await?;
|
||||
@ -302,7 +314,7 @@ impl GuestInit {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn network_setup(&mut self, network: &LaunchNetwork) -> Result<()> {
|
||||
async fn network_setup(&mut self, cfg: &LaunchInfo, network: &LaunchNetwork) -> Result<()> {
|
||||
trace!("setting up network for link");
|
||||
|
||||
let etc = PathBuf::from_str("/etc")?;
|
||||
@ -310,14 +322,33 @@ impl GuestInit {
|
||||
fs::create_dir(etc).await?;
|
||||
}
|
||||
let resolv = PathBuf::from_str("/etc/resolv.conf")?;
|
||||
let mut lines = vec!["# krata resolver configuration".to_string()];
|
||||
for nameserver in &network.resolver.nameservers {
|
||||
lines.push(format!("nameserver {}", nameserver));
|
||||
|
||||
{
|
||||
let mut lines = vec!["# krata resolver configuration".to_string()];
|
||||
for nameserver in &network.resolver.nameservers {
|
||||
lines.push(format!("nameserver {}", nameserver));
|
||||
}
|
||||
|
||||
let mut conf = lines.join("\n");
|
||||
conf.push('\n');
|
||||
fs::write(resolv, conf).await?;
|
||||
}
|
||||
|
||||
let hosts = PathBuf::from_str("/etc/hosts")?;
|
||||
if let Some(ref hostname) = cfg.hostname {
|
||||
let mut lines = if hosts.exists() {
|
||||
fs::read_to_string(&hosts)
|
||||
.await?
|
||||
.lines()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
vec!["127.0.0.1 localhost".to_string()]
|
||||
};
|
||||
lines.push(format!("127.0.1.1 {}", hostname));
|
||||
fs::write(&hosts, lines.join("\n") + "\n").await?;
|
||||
}
|
||||
|
||||
let mut conf = lines.join("\n");
|
||||
conf.push('\n');
|
||||
fs::write(resolv, conf).await?;
|
||||
self.network_configure_ethtool(network).await?;
|
||||
self.network_configure_link(network).await?;
|
||||
Ok(())
|
||||
@ -327,6 +358,14 @@ impl GuestInit {
|
||||
let (connection, handle, _) = rtnetlink::new_connection()?;
|
||||
tokio::spawn(connection);
|
||||
|
||||
let mut links = handle.link().get().match_name("lo".to_string()).execute();
|
||||
let Some(link) = links.try_next().await? else {
|
||||
warn!("unable to find link named lo");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
handle.link().set(link.header.index).up().execute().await?;
|
||||
|
||||
let ipv4_network: IpNetwork = network.ipv4.address.parse()?;
|
||||
let ipv4_gateway: Ipv4Addr = network.ipv4.gateway.parse()?;
|
||||
let ipv6_network: IpNetwork = network.ipv6.address.parse()?;
|
||||
@ -406,7 +445,7 @@ impl GuestInit {
|
||||
};
|
||||
|
||||
if launch.run.is_some() {
|
||||
cmd = launch.run.as_ref().unwrap().clone();
|
||||
cmd.clone_from(launch.run.as_ref().unwrap());
|
||||
}
|
||||
|
||||
if let Some(entrypoint) = config.entrypoint() {
|
||||
@ -427,7 +466,12 @@ impl GuestInit {
|
||||
}
|
||||
env.extend(launch.env.clone());
|
||||
env.insert("KRATA_CONTAINER".to_string(), "1".to_string());
|
||||
env.insert("TERM".to_string(), "vt100".to_string());
|
||||
|
||||
// If we were not provided a terminal definition in our launch manifest, we
|
||||
// default to xterm as most terminal emulators support the xterm control codes.
|
||||
if !env.contains_key("TERM") {
|
||||
env.insert("TERM".to_string(), "xterm".to_string());
|
||||
}
|
||||
|
||||
let path = GuestInit::resolve_executable(&env, path.into())?;
|
||||
let Some(file_name) = path.file_name() else {
|
||||
@ -454,10 +498,21 @@ impl GuestInit {
|
||||
working_dir = "/".to_string();
|
||||
}
|
||||
|
||||
self.fork_and_exec(idm, working_dir, path, cmd, env).await?;
|
||||
let cgroup = self.init_cgroup().await?;
|
||||
self.fork_and_exec(idm, cgroup, working_dir, path, cmd, env)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn init_cgroup(&self) -> Result<Cgroup> {
|
||||
trace!("initializing cgroup");
|
||||
let hierarchy = cgroups_rs::hierarchies::auto();
|
||||
let cgroup = Cgroup::new(hierarchy, "krata-guest-task")?;
|
||||
cgroup.set_cgroup_type("threaded")?;
|
||||
trace!("initialized cgroup");
|
||||
Ok(cgroup)
|
||||
}
|
||||
|
||||
fn strings_as_cstrings(values: Vec<String>) -> Result<Vec<CString>> {
|
||||
let mut results: Vec<CString> = vec![];
|
||||
for value in values {
|
||||
@ -506,19 +561,21 @@ impl GuestInit {
|
||||
async fn fork_and_exec(
|
||||
&mut self,
|
||||
idm: IdmClient,
|
||||
cgroup: Cgroup,
|
||||
working_dir: String,
|
||||
path: CString,
|
||||
cmd: Vec<CString>,
|
||||
env: Vec<CString>,
|
||||
) -> Result<()> {
|
||||
match unsafe { fork()? } {
|
||||
ForkResult::Parent { child } => self.background(idm, child).await,
|
||||
ForkResult::Child => self.foreground(working_dir, path, cmd, env).await,
|
||||
ForkResult::Parent { child } => self.background(idm, cgroup, child).await,
|
||||
ForkResult::Child => self.foreground(cgroup, working_dir, path, cmd, env).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn foreground(
|
||||
&mut self,
|
||||
cgroup: Cgroup,
|
||||
working_dir: String,
|
||||
path: CString,
|
||||
cmd: Vec<CString>,
|
||||
@ -526,6 +583,7 @@ impl GuestInit {
|
||||
) -> Result<()> {
|
||||
GuestInit::set_controlling_terminal()?;
|
||||
std::env::set_current_dir(working_dir)?;
|
||||
cgroup.add_task(CgroupPid::from(std::process::id() as u64))?;
|
||||
execve(&path, &cmd, &env)?;
|
||||
Ok(())
|
||||
}
|
||||
@ -538,8 +596,8 @@ impl GuestInit {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn background(&mut self, idm: IdmClient, executed: Pid) -> Result<()> {
|
||||
let mut background = GuestBackground::new(idm, executed).await?;
|
||||
async fn background(&mut self, idm: IdmClient, cgroup: Cgroup, executed: Pid) -> Result<()> {
|
||||
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
|
||||
background.run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use xenstore::{XsdClient, XsdInterface};
|
||||
pub mod background;
|
||||
pub mod childwait;
|
||||
pub mod init;
|
||||
pub mod metrics;
|
||||
|
||||
pub async fn death(code: c_int) -> Result<()> {
|
||||
let store = XsdClient::open().await?;
|
||||
|
121
crates/guest/src/metrics.rs
Normal file
121
crates/guest/src/metrics.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use std::{ops::Add, path::Path};
|
||||
|
||||
use anyhow::Result;
|
||||
use krata::idm::protocol::{IdmMetricFormat, IdmMetricNode};
|
||||
use sysinfo::Process;
|
||||
|
||||
pub struct MetricsCollector {}
|
||||
|
||||
impl MetricsCollector {
|
||||
pub fn new() -> Result<Self> {
|
||||
Ok(MetricsCollector {})
|
||||
}
|
||||
|
||||
pub fn collect(&self) -> Result<IdmMetricNode> {
|
||||
let mut sysinfo = sysinfo::System::new();
|
||||
Ok(IdmMetricNode::structural(
|
||||
"guest",
|
||||
vec![
|
||||
self.collect_system(&mut sysinfo)?,
|
||||
self.collect_processes(&mut sysinfo)?,
|
||||
],
|
||||
))
|
||||
}
|
||||
|
||||
fn collect_system(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
|
||||
sysinfo.refresh_memory();
|
||||
Ok(IdmMetricNode::structural(
|
||||
"system",
|
||||
vec![IdmMetricNode::structural(
|
||||
"memory",
|
||||
vec![
|
||||
IdmMetricNode::value("total", sysinfo.total_memory(), IdmMetricFormat::Bytes),
|
||||
IdmMetricNode::value("used", sysinfo.used_memory(), IdmMetricFormat::Bytes),
|
||||
IdmMetricNode::value("free", sysinfo.free_memory(), IdmMetricFormat::Bytes),
|
||||
],
|
||||
)],
|
||||
))
|
||||
}
|
||||
|
||||
fn collect_processes(&self, sysinfo: &mut sysinfo::System) -> Result<IdmMetricNode> {
|
||||
sysinfo.refresh_processes();
|
||||
let mut processes = Vec::new();
|
||||
let mut sysinfo_processes = sysinfo.processes().values().collect::<Vec<_>>();
|
||||
sysinfo_processes.sort_by_key(|x| x.pid());
|
||||
for process in sysinfo_processes {
|
||||
if process.thread_kind().is_some() {
|
||||
continue;
|
||||
}
|
||||
processes.push(MetricsCollector::process_node(process)?);
|
||||
}
|
||||
Ok(IdmMetricNode::structural("process", processes))
|
||||
}
|
||||
|
||||
fn process_node(process: &Process) -> Result<IdmMetricNode> {
|
||||
let mut metrics = vec![];
|
||||
|
||||
if let Some(parent) = process.parent() {
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"parent",
|
||||
parent.as_u32() as u64,
|
||||
IdmMetricFormat::Integer,
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(exe) = process.exe().and_then(path_as_str) {
|
||||
metrics.push(IdmMetricNode::raw_value("executable", exe));
|
||||
}
|
||||
|
||||
if let Some(working_directory) = process.cwd().and_then(path_as_str) {
|
||||
metrics.push(IdmMetricNode::raw_value("cwd", working_directory));
|
||||
}
|
||||
|
||||
let cmdline = process.cmd().to_vec();
|
||||
metrics.push(IdmMetricNode::raw_value("cmdline", cmdline));
|
||||
metrics.push(IdmMetricNode::structural(
|
||||
"memory",
|
||||
vec![
|
||||
IdmMetricNode::value("resident", process.memory(), IdmMetricFormat::Bytes),
|
||||
IdmMetricNode::value("virtual", process.virtual_memory(), IdmMetricFormat::Bytes),
|
||||
],
|
||||
));
|
||||
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"lifetime",
|
||||
process.run_time(),
|
||||
IdmMetricFormat::DurationSeconds,
|
||||
));
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"uid",
|
||||
process.user_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
||||
IdmMetricFormat::Integer,
|
||||
));
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"gid",
|
||||
process.group_id().map(|x| (*x).add(0)).unwrap_or(0) as f64,
|
||||
IdmMetricFormat::Integer,
|
||||
));
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"euid",
|
||||
process
|
||||
.effective_user_id()
|
||||
.map(|x| (*x).add(0))
|
||||
.unwrap_or(0) as f64,
|
||||
IdmMetricFormat::Integer,
|
||||
));
|
||||
metrics.push(IdmMetricNode::value(
|
||||
"egid",
|
||||
process.effective_group_id().map(|x| x.add(0)).unwrap_or(0) as f64,
|
||||
IdmMetricFormat::Integer,
|
||||
));
|
||||
|
||||
Ok(IdmMetricNode::structural(
|
||||
process.pid().to_string(),
|
||||
metrics,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn path_as_str(path: &Path) -> Option<String> {
|
||||
String::from_utf8(path.as_os_str().as_encoded_bytes().to_vec()).ok()
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
name = "krata"
|
||||
description = "Client library and common services for the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -10,12 +10,15 @@ resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
prost-reflect = { workspace = true }
|
||||
prost-types = { workspace = true }
|
||||
scopeguard = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
tonic = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
@ -6,18 +6,12 @@ fn main() -> Result<()> {
|
||||
.descriptor_pool("crate::DESCRIPTOR_POOL")
|
||||
.configure(
|
||||
&mut config,
|
||||
&[
|
||||
"proto/krata/v1/control.proto",
|
||||
"proto/krata/internal/idm.proto",
|
||||
],
|
||||
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
|
||||
&["proto/"],
|
||||
)?;
|
||||
tonic_build::configure().compile_with_config(
|
||||
config,
|
||||
&[
|
||||
"proto/krata/v1/control.proto",
|
||||
"proto/krata/internal/idm.proto",
|
||||
],
|
||||
&["proto/krata/v1/control.proto", "proto/krata/bus/idm.proto"],
|
||||
&["proto/"],
|
||||
)?;
|
||||
Ok(())
|
||||
|
67
crates/krata/proto/krata/bus/idm.proto
Normal file
67
crates/krata/proto/krata/bus/idm.proto
Normal file
@ -0,0 +1,67 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package krata.bus.idm;
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "dev.krata.proto.bus.idm";
|
||||
option java_outer_classname = "IdmProto";
|
||||
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
message IdmPacket {
|
||||
oneof content {
|
||||
IdmEvent event = 1;
|
||||
IdmRequest request = 2;
|
||||
IdmResponse response = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message IdmEvent {
|
||||
oneof event {
|
||||
IdmExitEvent exit = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message IdmExitEvent {
|
||||
int32 code = 1;
|
||||
}
|
||||
|
||||
message IdmRequest {
|
||||
uint64 id = 1;
|
||||
oneof request {
|
||||
IdmPingRequest ping = 2;
|
||||
IdmMetricsRequest metrics = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message IdmPingRequest {}
|
||||
|
||||
message IdmMetricsRequest {}
|
||||
|
||||
message IdmResponse {
|
||||
uint64 id = 1;
|
||||
oneof response {
|
||||
IdmPingResponse ping = 2;
|
||||
IdmMetricsResponse metrics = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message IdmPingResponse {}
|
||||
|
||||
message IdmMetricsResponse {
|
||||
IdmMetricNode root = 1;
|
||||
}
|
||||
|
||||
message IdmMetricNode {
|
||||
string name = 1;
|
||||
google.protobuf.Value value = 2;
|
||||
IdmMetricFormat format = 3;
|
||||
repeated IdmMetricNode children = 4;
|
||||
}
|
||||
|
||||
enum IdmMetricFormat {
|
||||
IDM_METRIC_FORMAT_UNKNOWN = 0;
|
||||
IDM_METRIC_FORMAT_BYTES = 1;
|
||||
IDM_METRIC_FORMAT_INTEGER = 2;
|
||||
IDM_METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package krata.internal.idm;
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_package = "dev.krata.proto.internal.idm";
|
||||
option java_outer_classname = "IdmProto";
|
||||
|
||||
message IdmExitEvent {
|
||||
int32 code = 1;
|
||||
}
|
||||
|
||||
message IdmEvent {
|
||||
oneof event {
|
||||
IdmExitEvent exit = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message IdmPacket {
|
||||
IdmEvent event = 1;
|
||||
}
|
@ -6,6 +6,8 @@ option java_multiple_files = true;
|
||||
option java_package = "dev.krata.proto.v1.common";
|
||||
option java_outer_classname = "CommonProto";
|
||||
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
message Guest {
|
||||
string id = 1;
|
||||
GuestSpec spec = 2;
|
||||
@ -27,8 +29,15 @@ message GuestImageSpec {
|
||||
}
|
||||
}
|
||||
|
||||
enum GuestOciImageFormat {
|
||||
GUEST_OCI_IMAGE_FORMAT_UNKNOWN = 0;
|
||||
GUEST_OCI_IMAGE_FORMAT_SQUASHFS = 1;
|
||||
GUEST_OCI_IMAGE_FORMAT_EROFS = 2;
|
||||
}
|
||||
|
||||
message GuestOciImageSpec {
|
||||
string image = 1;
|
||||
string digest = 1;
|
||||
GuestOciImageFormat format = 2;
|
||||
}
|
||||
|
||||
message GuestTaskSpec {
|
||||
@ -80,3 +89,17 @@ message GuestExitInfo {
|
||||
message GuestErrorInfo {
|
||||
string message = 1;
|
||||
}
|
||||
|
||||
message GuestMetricNode {
|
||||
string name = 1;
|
||||
google.protobuf.Value value = 2;
|
||||
GuestMetricFormat format = 3;
|
||||
repeated GuestMetricNode children = 4;
|
||||
}
|
||||
|
||||
enum GuestMetricFormat {
|
||||
GUEST_METRIC_FORMAT_UNKNOWN = 0;
|
||||
GUEST_METRIC_FORMAT_BYTES = 1;
|
||||
GUEST_METRIC_FORMAT_INTEGER = 2;
|
||||
GUEST_METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ option java_multiple_files = true;
|
||||
option java_package = "dev.krata.proto.v1.control";
|
||||
option java_outer_classname = "ControlProto";
|
||||
|
||||
import "krata/bus/idm.proto";
|
||||
import "krata/v1/common.proto";
|
||||
|
||||
service ControlService {
|
||||
@ -13,8 +14,14 @@ service ControlService {
|
||||
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
|
||||
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
|
||||
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
|
||||
|
||||
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
|
||||
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
|
||||
|
||||
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
||||
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
||||
|
||||
rpc PullImage(PullImageRequest) returns (stream PullImageReply);
|
||||
}
|
||||
|
||||
message CreateGuestRequest {
|
||||
@ -65,3 +72,63 @@ message WatchEventsReply {
|
||||
message GuestChangedEvent {
|
||||
krata.v1.common.Guest guest = 1;
|
||||
}
|
||||
|
||||
message ReadGuestMetricsRequest {
|
||||
string guest_id = 1;
|
||||
}
|
||||
|
||||
message ReadGuestMetricsReply {
|
||||
krata.v1.common.GuestMetricNode root = 1;
|
||||
}
|
||||
|
||||
message SnoopIdmRequest {}
|
||||
|
||||
message SnoopIdmReply {
|
||||
uint32 from = 1;
|
||||
uint32 to = 2;
|
||||
krata.bus.idm.IdmPacket packet = 3;
|
||||
}
|
||||
|
||||
enum PullImageProgressLayerPhase {
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_UNKNOWN = 0;
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_WAITING = 1;
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADING = 2;
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_DOWNLOADED = 3;
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTING = 4;
|
||||
PULL_IMAGE_PROGRESS_LAYER_PHASE_EXTRACTED = 5;
|
||||
}
|
||||
|
||||
message PullImageProgressLayer {
|
||||
string id = 1;
|
||||
PullImageProgressLayerPhase phase = 2;
|
||||
uint64 value = 3;
|
||||
uint64 total = 4;
|
||||
}
|
||||
|
||||
enum PullImageProgressPhase {
|
||||
PULL_IMAGE_PROGRESS_PHASE_UNKNOWN = 0;
|
||||
PULL_IMAGE_PROGRESS_PHASE_RESOLVING = 1;
|
||||
PULL_IMAGE_PROGRESS_PHASE_RESOLVED = 2;
|
||||
PULL_IMAGE_PROGRESS_PHASE_CONFIG_ACQUIRE = 3;
|
||||
PULL_IMAGE_PROGRESS_PHASE_LAYER_ACQUIRE = 4;
|
||||
PULL_IMAGE_PROGRESS_PHASE_PACKING = 5;
|
||||
PULL_IMAGE_PROGRESS_PHASE_COMPLETE = 6;
|
||||
}
|
||||
|
||||
message PullImageProgress {
|
||||
PullImageProgressPhase phase = 1;
|
||||
repeated PullImageProgressLayer layers = 2;
|
||||
uint64 value = 3;
|
||||
uint64 total = 4;
|
||||
}
|
||||
|
||||
message PullImageRequest {
|
||||
string image = 1;
|
||||
krata.v1.common.GuestOciImageFormat format = 2;
|
||||
}
|
||||
|
||||
message PullImageReply {
|
||||
PullImageProgress progress = 1;
|
||||
string digest = 2;
|
||||
krata.v1.common.GuestOciImageFormat format = 3;
|
||||
}
|
||||
|
89
crates/krata/src/bus/idm.rs
Normal file
89
crates/krata/src/bus/idm.rs
Normal file
@ -0,0 +1,89 @@
|
||||
use prost_types::{ListValue, Value};
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/krata.bus.idm.rs"));
|
||||
|
||||
pub trait AsIdmMetricValue {
|
||||
fn as_metric_value(&self) -> Value;
|
||||
}
|
||||
|
||||
impl IdmMetricNode {
|
||||
pub fn structural<N: AsRef<str>>(name: N, children: Vec<IdmMetricNode>) -> IdmMetricNode {
|
||||
IdmMetricNode {
|
||||
name: name.as_ref().to_string(),
|
||||
value: None,
|
||||
format: IdmMetricFormat::Unknown.into(),
|
||||
children,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn raw_value<N: AsRef<str>, V: AsIdmMetricValue>(name: N, value: V) -> IdmMetricNode {
|
||||
IdmMetricNode {
|
||||
name: name.as_ref().to_string(),
|
||||
value: Some(value.as_metric_value()),
|
||||
format: IdmMetricFormat::Unknown.into(),
|
||||
children: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value<N: AsRef<str>, V: AsIdmMetricValue>(
|
||||
name: N,
|
||||
value: V,
|
||||
format: IdmMetricFormat,
|
||||
) -> IdmMetricNode {
|
||||
IdmMetricNode {
|
||||
name: name.as_ref().to_string(),
|
||||
value: Some(value.as_metric_value()),
|
||||
format: format.into(),
|
||||
children: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsIdmMetricValue for String {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
Value {
|
||||
kind: Some(prost_types::value::Kind::StringValue(self.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsIdmMetricValue for &str {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
Value {
|
||||
kind: Some(prost_types::value::Kind::StringValue(self.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsIdmMetricValue for u64 {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
numeric(*self as f64)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsIdmMetricValue for i64 {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
numeric(*self as f64)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsIdmMetricValue for f64 {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
numeric(*self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsIdmMetricValue> AsIdmMetricValue for Vec<T> {
|
||||
fn as_metric_value(&self) -> Value {
|
||||
let values = self.iter().map(|x| x.as_metric_value()).collect::<_>();
|
||||
Value {
|
||||
kind: Some(prost_types::value::Kind::ListValue(ListValue { values })),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn numeric(value: f64) -> Value {
|
||||
Value {
|
||||
kind: Some(prost_types::value::Kind::NumberValue(value)),
|
||||
}
|
||||
}
|
1
crates/krata/src/bus/mod.rs
Normal file
1
crates/krata/src/bus/mod.rs
Normal file
@ -0,0 +1 @@
|
||||
pub mod idm;
|
@ -1,11 +1,14 @@
|
||||
use std::sync::Arc;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use crate::v1::control::{watch_events_reply::Event, WatchEventsReply};
|
||||
use crate::v1::control::{
|
||||
control_service_client::ControlServiceClient, watch_events_reply::Event, WatchEventsReply,
|
||||
WatchEventsRequest,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use log::trace;
|
||||
use tokio::{sync::broadcast, task::JoinHandle};
|
||||
use log::{error, trace, warn};
|
||||
use tokio::{sync::broadcast, task::JoinHandle, time::sleep};
|
||||
use tokio_stream::StreamExt;
|
||||
use tonic::Streaming;
|
||||
use tonic::{transport::Channel, Streaming};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct EventStream {
|
||||
@ -14,27 +17,12 @@ pub struct EventStream {
|
||||
}
|
||||
|
||||
impl EventStream {
|
||||
pub async fn open(mut events: Streaming<WatchEventsReply>) -> Result<Self> {
|
||||
pub async fn open(client: ControlServiceClient<Channel>) -> Result<Self> {
|
||||
let (sender, _) = broadcast::channel(1000);
|
||||
let emit = sender.clone();
|
||||
let task = tokio::task::spawn(async move {
|
||||
loop {
|
||||
let Some(result) = events.next().await else {
|
||||
break;
|
||||
};
|
||||
|
||||
let reply = match result {
|
||||
Ok(reply) => reply,
|
||||
Err(error) => {
|
||||
trace!("event stream processing failed: {}", error);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(event) = reply.event else {
|
||||
continue;
|
||||
};
|
||||
let _ = emit.send(event);
|
||||
if let Err(error) = EventStream::process(client, emit).await {
|
||||
error!("failed to process event stream: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(Self {
|
||||
@ -43,6 +31,48 @@ impl EventStream {
|
||||
})
|
||||
}
|
||||
|
||||
async fn process(
|
||||
mut client: ControlServiceClient<Channel>,
|
||||
emit: broadcast::Sender<Event>,
|
||||
) -> Result<()> {
|
||||
let mut events: Option<Streaming<WatchEventsReply>> = None;
|
||||
loop {
|
||||
let mut stream = match events {
|
||||
Some(stream) => stream,
|
||||
None => {
|
||||
let result = client.watch_events(WatchEventsRequest {}).await;
|
||||
if let Err(error) = result {
|
||||
warn!("failed to watch events: {}", error);
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
result.unwrap().into_inner()
|
||||
}
|
||||
};
|
||||
|
||||
let Some(result) = stream.next().await else {
|
||||
events = None;
|
||||
continue;
|
||||
};
|
||||
|
||||
let reply = match result {
|
||||
Ok(reply) => reply,
|
||||
Err(error) => {
|
||||
trace!("event stream processing failed: {}", error);
|
||||
events = None;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(event) = reply.event else {
|
||||
events = Some(stream);
|
||||
continue;
|
||||
};
|
||||
let _ = emit.send(event);
|
||||
events = Some(stream);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscribe(&self) -> broadcast::Receiver<Event> {
|
||||
self.sender.subscribe()
|
||||
}
|
||||
|
@ -1,53 +1,58 @@
|
||||
use std::path::Path;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::Path,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use super::protocol::IdmPacket;
|
||||
use super::protocol::{
|
||||
idm_packet::Content, idm_request::Request, idm_response::Response, IdmEvent, IdmPacket,
|
||||
IdmRequest, IdmResponse,
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use bytes::BytesMut;
|
||||
use log::error;
|
||||
use log::{debug, error};
|
||||
use nix::sys::termios::{cfmakeraw, tcgetattr, tcsetattr, SetArg};
|
||||
use prost::Message;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{unix::AsyncFd, AsyncReadExt, AsyncWriteExt},
|
||||
select,
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
sync::{
|
||||
broadcast,
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
oneshot, Mutex,
|
||||
},
|
||||
task::JoinHandle,
|
||||
time::timeout,
|
||||
};
|
||||
|
||||
type RequestMap = Arc<Mutex<HashMap<u64, oneshot::Sender<IdmResponse>>>>;
|
||||
|
||||
const IDM_PACKET_QUEUE_LEN: usize = 100;
|
||||
const IDM_REQUEST_TIMEOUT_SECS: u64 = 10;
|
||||
const IDM_PACKET_MAX_SIZE: usize = 20 * 1024 * 1024;
|
||||
|
||||
pub struct IdmClient {
|
||||
pub receiver: Receiver<IdmPacket>,
|
||||
pub sender: Sender<IdmPacket>,
|
||||
task: JoinHandle<()>,
|
||||
#[async_trait::async_trait]
|
||||
pub trait IdmBackend: Send {
|
||||
async fn recv(&mut self) -> Result<IdmPacket>;
|
||||
async fn send(&mut self, packet: IdmPacket) -> Result<()>;
|
||||
}
|
||||
|
||||
impl Drop for IdmClient {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
pub struct IdmFileBackend {
|
||||
read_fd: Arc<Mutex<AsyncFd<File>>>,
|
||||
write: Arc<Mutex<File>>,
|
||||
}
|
||||
|
||||
impl IdmClient {
|
||||
pub async fn open<P: AsRef<Path>>(path: P) -> Result<IdmClient> {
|
||||
let file = File::options()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
IdmClient::set_raw_port(&file)?;
|
||||
let (rx_sender, rx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
|
||||
let (tx_sender, tx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
|
||||
let task = tokio::task::spawn(async move {
|
||||
if let Err(error) = IdmClient::process(file, rx_sender, tx_receiver).await {
|
||||
error!("failed to handle idm client processing: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(IdmClient {
|
||||
receiver: rx_receiver,
|
||||
sender: tx_sender,
|
||||
task,
|
||||
impl IdmFileBackend {
|
||||
pub async fn new(read_file: File, write_file: File) -> Result<IdmFileBackend> {
|
||||
IdmFileBackend::set_raw_port(&read_file)?;
|
||||
IdmFileBackend::set_raw_port(&write_file)?;
|
||||
Ok(IdmFileBackend {
|
||||
read_fd: Arc::new(Mutex::new(AsyncFd::new(read_file)?)),
|
||||
write: Arc::new(Mutex::new(write_file)),
|
||||
})
|
||||
}
|
||||
|
||||
@ -57,31 +62,208 @@ impl IdmClient {
|
||||
tcsetattr(file, SetArg::TCSANOW, &termios)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl IdmBackend for IdmFileBackend {
|
||||
async fn recv(&mut self) -> Result<IdmPacket> {
|
||||
let mut fd = self.read_fd.lock().await;
|
||||
let mut guard = fd.readable_mut().await?;
|
||||
let b1 = guard.get_inner_mut().read_u8().await?;
|
||||
if b1 != 0xff {
|
||||
return Ok(IdmPacket::default());
|
||||
}
|
||||
let b2 = guard.get_inner_mut().read_u8().await?;
|
||||
if b2 != 0xff {
|
||||
return Ok(IdmPacket::default());
|
||||
}
|
||||
let size = guard.get_inner_mut().read_u32_le().await?;
|
||||
if size == 0 {
|
||||
return Ok(IdmPacket::default());
|
||||
}
|
||||
let mut buffer = vec![0u8; size as usize];
|
||||
guard.get_inner_mut().read_exact(&mut buffer).await?;
|
||||
match IdmPacket::decode(buffer.as_slice()) {
|
||||
Ok(packet) => Ok(packet),
|
||||
Err(error) => Err(anyhow!("received invalid idm packet: {}", error)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send(&mut self, packet: IdmPacket) -> Result<()> {
|
||||
let mut file = self.write.lock().await;
|
||||
let data = packet.encode_to_vec();
|
||||
file.write_all(&[0xff, 0xff]).await?;
|
||||
file.write_u32_le(data.len() as u32).await?;
|
||||
file.write_all(&data).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IdmClient {
|
||||
request_backend_sender: broadcast::Sender<IdmRequest>,
|
||||
next_request_id: Arc<Mutex<u64>>,
|
||||
event_receiver_sender: broadcast::Sender<IdmEvent>,
|
||||
tx_sender: Sender<IdmPacket>,
|
||||
requests: RequestMap,
|
||||
task: Arc<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Drop for IdmClient {
|
||||
fn drop(&mut self) {
|
||||
if Arc::strong_count(&self.task) <= 1 {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IdmClient {
|
||||
pub async fn new(backend: Box<dyn IdmBackend>) -> Result<IdmClient> {
|
||||
let requests = Arc::new(Mutex::new(HashMap::new()));
|
||||
let (event_sender, event_receiver) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
||||
let (internal_request_backend_sender, _) = broadcast::channel(IDM_PACKET_QUEUE_LEN);
|
||||
let (tx_sender, tx_receiver) = channel(IDM_PACKET_QUEUE_LEN);
|
||||
let backend_event_sender = event_sender.clone();
|
||||
let request_backend_sender = internal_request_backend_sender.clone();
|
||||
let requests_for_client = requests.clone();
|
||||
let task = tokio::task::spawn(async move {
|
||||
if let Err(error) = IdmClient::process(
|
||||
backend,
|
||||
backend_event_sender,
|
||||
requests,
|
||||
internal_request_backend_sender,
|
||||
event_receiver,
|
||||
tx_receiver,
|
||||
)
|
||||
.await
|
||||
{
|
||||
debug!("failed to handle idm client processing: {}", error);
|
||||
}
|
||||
});
|
||||
Ok(IdmClient {
|
||||
next_request_id: Arc::new(Mutex::new(0)),
|
||||
event_receiver_sender: event_sender.clone(),
|
||||
request_backend_sender,
|
||||
requests: requests_for_client,
|
||||
tx_sender,
|
||||
task: Arc::new(task),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn open<P: AsRef<Path>>(path: P) -> Result<IdmClient> {
|
||||
let read_file = File::options()
|
||||
.read(true)
|
||||
.write(false)
|
||||
.create(false)
|
||||
.open(&path)
|
||||
.await?;
|
||||
let write_file = File::options()
|
||||
.read(false)
|
||||
.write(true)
|
||||
.create(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
let backend = IdmFileBackend::new(read_file, write_file).await?;
|
||||
IdmClient::new(Box::new(backend) as Box<dyn IdmBackend>).await
|
||||
}
|
||||
|
||||
pub async fn emit(&self, event: IdmEvent) -> Result<()> {
|
||||
self.tx_sender
|
||||
.send(IdmPacket {
|
||||
content: Some(Content::Event(event)),
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn requests(&self) -> Result<broadcast::Receiver<IdmRequest>> {
|
||||
Ok(self.request_backend_sender.subscribe())
|
||||
}
|
||||
|
||||
pub async fn respond(&self, id: u64, response: Response) -> Result<()> {
|
||||
let packet = IdmPacket {
|
||||
content: Some(Content::Response(IdmResponse {
|
||||
id,
|
||||
response: Some(response),
|
||||
})),
|
||||
};
|
||||
self.tx_sender.send(packet).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn subscribe(&self) -> Result<broadcast::Receiver<IdmEvent>> {
|
||||
Ok(self.event_receiver_sender.subscribe())
|
||||
}
|
||||
|
||||
pub async fn send(&self, request: Request) -> Result<Response> {
|
||||
let (sender, receiver) = oneshot::channel::<IdmResponse>();
|
||||
let req = {
|
||||
let mut guard = self.next_request_id.lock().await;
|
||||
let req = *guard;
|
||||
*guard = req.wrapping_add(1);
|
||||
req
|
||||
};
|
||||
let mut requests = self.requests.lock().await;
|
||||
requests.insert(req, sender);
|
||||
drop(requests);
|
||||
let success = AtomicBool::new(false);
|
||||
let _guard = scopeguard::guard(self.requests.clone(), |requests| {
|
||||
if success.load(Ordering::Acquire) {
|
||||
return;
|
||||
}
|
||||
tokio::task::spawn(async move {
|
||||
let mut requests = requests.lock().await;
|
||||
requests.remove(&req);
|
||||
});
|
||||
});
|
||||
self.tx_sender
|
||||
.send(IdmPacket {
|
||||
content: Some(Content::Request(IdmRequest {
|
||||
id: req,
|
||||
request: Some(request),
|
||||
})),
|
||||
})
|
||||
.await?;
|
||||
|
||||
let response = timeout(Duration::from_secs(IDM_REQUEST_TIMEOUT_SECS), receiver).await??;
|
||||
success.store(true, Ordering::Release);
|
||||
if let Some(response) = response.response {
|
||||
Ok(response)
|
||||
} else {
|
||||
Err(anyhow!("response did not contain any content"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn process(
|
||||
file: File,
|
||||
sender: Sender<IdmPacket>,
|
||||
mut backend: Box<dyn IdmBackend>,
|
||||
event_sender: broadcast::Sender<IdmEvent>,
|
||||
requests: RequestMap,
|
||||
request_backend_sender: broadcast::Sender<IdmRequest>,
|
||||
_event_receiver: broadcast::Receiver<IdmEvent>,
|
||||
mut receiver: Receiver<IdmPacket>,
|
||||
) -> Result<()> {
|
||||
let mut file = AsyncFd::new(file)?;
|
||||
loop {
|
||||
select! {
|
||||
x = file.readable_mut() => match x {
|
||||
Ok(mut guard) => {
|
||||
let size = guard.get_inner_mut().read_u16_le().await?;
|
||||
if size == 0 {
|
||||
continue;
|
||||
}
|
||||
let mut buffer = BytesMut::with_capacity(size as usize);
|
||||
guard.get_inner_mut().read_exact(&mut buffer).await?;
|
||||
match IdmPacket::decode(buffer) {
|
||||
Ok(packet) => {
|
||||
sender.send(packet).await?;
|
||||
x = backend.recv() => match x {
|
||||
Ok(packet) => {
|
||||
match packet.content {
|
||||
Some(Content::Event(event)) => {
|
||||
let _ = event_sender.send(event);
|
||||
},
|
||||
|
||||
Err(error) => {
|
||||
error!("received invalid idm packet: {}", error);
|
||||
}
|
||||
Some(Content::Request(request)) => {
|
||||
let _ = request_backend_sender.send(request);
|
||||
},
|
||||
|
||||
Some(Content::Response(response)) => {
|
||||
let mut requests = requests.lock().await;
|
||||
if let Some(sender) = requests.remove(&response.id) {
|
||||
drop(requests);
|
||||
let _ = sender.send(response);
|
||||
}
|
||||
},
|
||||
|
||||
_ => {},
|
||||
}
|
||||
},
|
||||
|
||||
@ -91,13 +273,12 @@ impl IdmClient {
|
||||
},
|
||||
x = receiver.recv() => match x {
|
||||
Some(packet) => {
|
||||
let data = packet.encode_to_vec();
|
||||
if data.len() > u16::MAX as usize {
|
||||
error!("unable to send idm packet, packet size exceeded (tried to send {} bytes)", data.len());
|
||||
let length = packet.encoded_len();
|
||||
if length > IDM_PACKET_MAX_SIZE {
|
||||
error!("unable to send idm packet, packet size exceeded (tried to send {} bytes)", length);
|
||||
continue;
|
||||
}
|
||||
file.get_mut().write_u16_le(data.len() as u16).await?;
|
||||
file.get_mut().write_all(&data).await?;
|
||||
backend.send(packet).await?;
|
||||
},
|
||||
|
||||
None => {
|
||||
|
@ -1,3 +1,3 @@
|
||||
#[cfg(unix)]
|
||||
pub mod client;
|
||||
pub mod protocol;
|
||||
pub use crate::bus::idm as protocol;
|
||||
|
@ -1 +0,0 @@
|
||||
include!(concat!(env!("OUT_DIR"), "/krata.internal.idm.rs"));
|
@ -2,24 +2,30 @@ use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum LaunchPackedFormat {
|
||||
Squashfs,
|
||||
Erofs,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchNetworkIpv4 {
|
||||
pub address: String,
|
||||
pub gateway: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchNetworkIpv6 {
|
||||
pub address: String,
|
||||
pub gateway: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchNetworkResolver {
|
||||
pub nameservers: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchNetwork {
|
||||
pub link: String,
|
||||
pub ipv4: LaunchNetworkIpv4,
|
||||
@ -27,8 +33,15 @@ pub struct LaunchNetwork {
|
||||
pub resolver: LaunchNetworkResolver,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchRoot {
|
||||
pub format: LaunchPackedFormat,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LaunchInfo {
|
||||
pub root: LaunchRoot,
|
||||
pub hostname: Option<String>,
|
||||
pub network: Option<LaunchNetwork>,
|
||||
pub env: HashMap<String, String>,
|
||||
pub run: Option<Vec<String>>,
|
||||
|
@ -1,6 +1,7 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use prost_reflect::DescriptorPool;
|
||||
|
||||
pub mod bus;
|
||||
pub mod v1;
|
||||
|
||||
pub mod client;
|
||||
|
@ -1 +1,2 @@
|
||||
#![allow(clippy::all)]
|
||||
tonic::include_proto!("krata.v1.common");
|
||||
|
@ -1 +1,2 @@
|
||||
#![allow(clippy::all)]
|
||||
tonic::include_proto!("krata.v1.control");
|
||||
|
@ -2,7 +2,7 @@
|
||||
name = "krata-network"
|
||||
description = "Networking services for the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -16,7 +16,7 @@ clap = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
etherparse = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.3" }
|
||||
krata = { path = "../krata", version = "^0.0.9" }
|
||||
krata-advmac = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
|
@ -5,7 +5,7 @@ use krata::{
|
||||
common::Guest,
|
||||
control::{
|
||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||
ListGuestsRequest, WatchEventsRequest,
|
||||
ListGuestsRequest,
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -50,12 +50,11 @@ pub struct AutoNetworkChangeset {
|
||||
}
|
||||
|
||||
impl AutoNetworkWatcher {
|
||||
pub async fn new(mut control: ControlServiceClient<Channel>) -> Result<AutoNetworkWatcher> {
|
||||
let watch_events_response = control.watch_events(WatchEventsRequest {}).await?;
|
||||
|
||||
pub async fn new(control: ControlServiceClient<Channel>) -> Result<AutoNetworkWatcher> {
|
||||
let client = control.clone();
|
||||
Ok(AutoNetworkWatcher {
|
||||
control,
|
||||
events: EventStream::open(watch_events_response.into_inner()).await?,
|
||||
events: EventStream::open(client).await?,
|
||||
known: HashMap::new(),
|
||||
})
|
||||
}
|
||||
@ -136,7 +135,15 @@ impl AutoNetworkWatcher {
|
||||
let mut added: Vec<NetworkMetadata> = Vec::new();
|
||||
let mut removed: Vec<NetworkMetadata> = Vec::new();
|
||||
|
||||
for network in self.read().await? {
|
||||
let networks = match self.read().await {
|
||||
Ok(networks) => networks,
|
||||
Err(error) => {
|
||||
warn!("failed to read network changes: {}", error);
|
||||
return Ok(AutoNetworkChangeset { added, removed });
|
||||
}
|
||||
};
|
||||
|
||||
for network in networks {
|
||||
seen.push(network.uuid);
|
||||
if self.known.contains_key(&network.uuid) {
|
||||
continue;
|
||||
|
@ -2,7 +2,7 @@
|
||||
name = "krata-oci"
|
||||
description = "OCI services for the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -14,11 +14,13 @@ async-compression = { workspace = true, features = ["tokio", "gzip", "zstd"] }
|
||||
async-trait = { workspace = true }
|
||||
backhand = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
krata-tokio-tar = { workspace = true }
|
||||
log = { workspace = true }
|
||||
oci-spec = { workspace = true }
|
||||
path-clean = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
scopeguard = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha256 = { workspace = true }
|
||||
|
@ -2,8 +2,13 @@ use std::{env::args, path::PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use env_logger::Env;
|
||||
use krataoci::{cache::ImageCache, compiler::ImageCompiler, name::ImageName};
|
||||
use tokio::fs;
|
||||
use krataoci::{
|
||||
name::ImageName,
|
||||
packer::{service::OciPackerService, OciPackedFormat},
|
||||
progress::{OciProgress, OciProgressContext},
|
||||
registry::OciPlatform,
|
||||
};
|
||||
use tokio::{fs, sync::mpsc::channel};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
@ -17,13 +22,32 @@ async fn main() -> Result<()> {
|
||||
fs::create_dir(&cache_dir).await?;
|
||||
}
|
||||
|
||||
let cache = ImageCache::new(&cache_dir)?;
|
||||
let compiler = ImageCompiler::new(&cache, seed)?;
|
||||
let info = compiler.compile(&image).await?;
|
||||
let (sender, mut receiver) = channel::<OciProgress>(100);
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
let mut progresses = Vec::new();
|
||||
let _ = receiver.recv_many(&mut progresses, 100).await;
|
||||
let Some(progress) = progresses.last() else {
|
||||
continue;
|
||||
};
|
||||
println!("phase {:?}", progress.phase);
|
||||
for (id, layer) in &progress.layers {
|
||||
println!(
|
||||
"{} {:?} {} of {}",
|
||||
id, layer.phase, layer.value, layer.total
|
||||
)
|
||||
}
|
||||
}
|
||||
});
|
||||
let context = OciProgressContext::new(sender);
|
||||
let service = OciPackerService::new(seed, &cache_dir, OciPlatform::current())?;
|
||||
let packed = service
|
||||
.request(image.clone(), OciPackedFormat::Squashfs, context)
|
||||
.await?;
|
||||
println!(
|
||||
"generated squashfs of {} to {}",
|
||||
image,
|
||||
info.image_squashfs.to_string_lossy()
|
||||
packed.path.to_string_lossy()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
239
crates/oci/src/assemble.rs
Normal file
239
crates/oci/src/assemble.rs
Normal file
@ -0,0 +1,239 @@
|
||||
use crate::fetch::{OciImageFetcher, OciImageLayer, OciResolvedImage};
|
||||
use crate::progress::OciBoundProgress;
|
||||
use crate::vfs::{VfsNode, VfsTree};
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::{debug, trace, warn};
|
||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio_stream::StreamExt;
|
||||
use tokio_tar::{Archive, Entry};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct OciImageAssembled {
|
||||
pub digest: String,
|
||||
pub manifest: ImageManifest,
|
||||
pub config: ImageConfiguration,
|
||||
pub vfs: Arc<VfsTree>,
|
||||
pub tmp_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Drop for OciImageAssembled {
|
||||
fn drop(&mut self) {
|
||||
if let Some(tmp) = self.tmp_dir.clone() {
|
||||
tokio::task::spawn(async move {
|
||||
let _ = fs::remove_dir_all(&tmp).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OciImageAssembler {
|
||||
downloader: OciImageFetcher,
|
||||
resolved: OciResolvedImage,
|
||||
progress: OciBoundProgress,
|
||||
work_dir: PathBuf,
|
||||
disk_dir: PathBuf,
|
||||
tmp_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl OciImageAssembler {
|
||||
pub async fn new(
|
||||
downloader: OciImageFetcher,
|
||||
resolved: OciResolvedImage,
|
||||
progress: OciBoundProgress,
|
||||
work_dir: Option<PathBuf>,
|
||||
disk_dir: Option<PathBuf>,
|
||||
) -> Result<OciImageAssembler> {
|
||||
let tmp_dir = if work_dir.is_none() || disk_dir.is_none() {
|
||||
let mut tmp_dir = std::env::temp_dir().clone();
|
||||
tmp_dir.push(format!("oci-assemble-{}", Uuid::new_v4()));
|
||||
Some(tmp_dir)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let work_dir = if let Some(work_dir) = work_dir {
|
||||
work_dir
|
||||
} else {
|
||||
let mut tmp_dir = tmp_dir
|
||||
.clone()
|
||||
.ok_or(anyhow!("tmp_dir was not created when expected"))?;
|
||||
tmp_dir.push("work");
|
||||
tmp_dir
|
||||
};
|
||||
|
||||
let target_dir = if let Some(target_dir) = disk_dir {
|
||||
target_dir
|
||||
} else {
|
||||
let mut tmp_dir = tmp_dir
|
||||
.clone()
|
||||
.ok_or(anyhow!("tmp_dir was not created when expected"))?;
|
||||
tmp_dir.push("image");
|
||||
tmp_dir
|
||||
};
|
||||
|
||||
fs::create_dir_all(&work_dir).await?;
|
||||
fs::create_dir_all(&target_dir).await?;
|
||||
|
||||
Ok(OciImageAssembler {
|
||||
downloader,
|
||||
resolved,
|
||||
progress,
|
||||
work_dir,
|
||||
disk_dir: target_dir,
|
||||
tmp_dir,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn assemble(self) -> Result<OciImageAssembled> {
|
||||
debug!("assemble");
|
||||
let mut layer_dir = self.work_dir.clone();
|
||||
layer_dir.push("layer");
|
||||
fs::create_dir_all(&layer_dir).await?;
|
||||
self.assemble_with(&layer_dir).await
|
||||
}
|
||||
|
||||
async fn assemble_with(self, layer_dir: &Path) -> Result<OciImageAssembled> {
|
||||
let local = self
|
||||
.downloader
|
||||
.download(self.resolved.clone(), layer_dir)
|
||||
.await?;
|
||||
let mut vfs = VfsTree::new();
|
||||
for layer in &local.layers {
|
||||
debug!(
|
||||
"process layer digest={} compression={:?}",
|
||||
&layer.digest, layer.compression,
|
||||
);
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.extracting_layer(&layer.digest, 0, 1);
|
||||
})
|
||||
.await;
|
||||
debug!("process layer digest={}", &layer.digest,);
|
||||
let mut archive = layer.archive().await?;
|
||||
let mut entries = archive.entries()?;
|
||||
while let Some(entry) = entries.next().await {
|
||||
let mut entry = entry?;
|
||||
let path = entry.path()?;
|
||||
let Some(name) = path.file_name() else {
|
||||
continue;
|
||||
};
|
||||
let Some(name) = name.to_str() else {
|
||||
continue;
|
||||
};
|
||||
if name.starts_with(".wh.") {
|
||||
self.process_whiteout_entry(&mut vfs, &entry, name, layer)
|
||||
.await?;
|
||||
} else {
|
||||
vfs.insert_tar_entry(&entry)?;
|
||||
self.process_write_entry(&mut vfs, &mut entry, layer)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.extracted_layer(&layer.digest);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
for layer in &local.layers {
|
||||
if layer.path.exists() {
|
||||
fs::remove_file(&layer.path).await?;
|
||||
}
|
||||
}
|
||||
Ok(OciImageAssembled {
|
||||
vfs: Arc::new(vfs),
|
||||
digest: self.resolved.digest,
|
||||
manifest: self.resolved.manifest,
|
||||
config: local.config,
|
||||
tmp_dir: self.tmp_dir,
|
||||
})
|
||||
}
|
||||
|
||||
async fn process_whiteout_entry(
|
||||
&self,
|
||||
vfs: &mut VfsTree,
|
||||
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
||||
name: &str,
|
||||
layer: &OciImageLayer,
|
||||
) -> Result<()> {
|
||||
let path = entry.path()?;
|
||||
let mut path = path.to_path_buf();
|
||||
path.pop();
|
||||
|
||||
let opaque = name == ".wh..wh..opq";
|
||||
|
||||
if !opaque {
|
||||
let file = &name[4..];
|
||||
path.push(file);
|
||||
}
|
||||
|
||||
trace!(
|
||||
"whiteout entry {:?} layer={} path={:?}",
|
||||
entry.path()?,
|
||||
&layer.digest,
|
||||
path
|
||||
);
|
||||
|
||||
let result = vfs.root.remove(&path);
|
||||
if let Some((parent, mut removed)) = result {
|
||||
delete_disk_paths(&removed).await?;
|
||||
if opaque {
|
||||
removed.children.clear();
|
||||
parent.children.push(removed);
|
||||
}
|
||||
} else {
|
||||
warn!(
|
||||
"whiteout entry layer={} path={:?} did not exist",
|
||||
&layer.digest, path
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_write_entry(
|
||||
&self,
|
||||
vfs: &mut VfsTree,
|
||||
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
||||
layer: &OciImageLayer,
|
||||
) -> Result<()> {
|
||||
if !entry.header().entry_type().is_file() {
|
||||
return Ok(());
|
||||
}
|
||||
trace!(
|
||||
"unpack entry layer={} path={:?} type={:?}",
|
||||
&layer.digest,
|
||||
entry.path()?,
|
||||
entry.header().entry_type(),
|
||||
);
|
||||
entry.set_preserve_permissions(false);
|
||||
entry.set_unpack_xattrs(false);
|
||||
entry.set_preserve_mtime(false);
|
||||
let path = entry
|
||||
.unpack_in(&self.disk_dir)
|
||||
.await?
|
||||
.ok_or(anyhow!("unpack did not return a path"))?;
|
||||
vfs.set_disk_path(&entry.path()?, &path)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_disk_paths(node: &VfsNode) -> Result<()> {
|
||||
let mut queue = vec![node];
|
||||
while !queue.is_empty() {
|
||||
let node = queue.remove(0);
|
||||
if let Some(ref disk_path) = node.disk_path {
|
||||
if !disk_path.exists() {
|
||||
warn!("disk path {:?} does not exist", disk_path);
|
||||
}
|
||||
fs::remove_file(disk_path).await?;
|
||||
}
|
||||
let children = node.children.iter().collect::<Vec<_>>();
|
||||
queue.extend_from_slice(&children);
|
||||
}
|
||||
Ok(())
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
use super::compiler::ImageInfo;
|
||||
use anyhow::Result;
|
||||
use log::debug;
|
||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::fs;
|
||||
|
||||
pub struct ImageCache {
|
||||
cache_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl ImageCache {
|
||||
pub fn new(cache_dir: &Path) -> Result<ImageCache> {
|
||||
Ok(ImageCache {
|
||||
cache_dir: cache_dir.to_path_buf(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn recall(&self, digest: &str) -> Result<Option<ImageInfo>> {
|
||||
let mut squashfs_path = self.cache_dir.clone();
|
||||
let mut config_path = self.cache_dir.clone();
|
||||
let mut manifest_path = self.cache_dir.clone();
|
||||
squashfs_path.push(format!("{}.squashfs", digest));
|
||||
manifest_path.push(format!("{}.manifest.json", digest));
|
||||
config_path.push(format!("{}.config.json", digest));
|
||||
Ok(
|
||||
if squashfs_path.exists() && manifest_path.exists() && config_path.exists() {
|
||||
let squashfs_metadata = fs::metadata(&squashfs_path).await?;
|
||||
let manifest_metadata = fs::metadata(&manifest_path).await?;
|
||||
let config_metadata = fs::metadata(&config_path).await?;
|
||||
if squashfs_metadata.is_file()
|
||||
&& manifest_metadata.is_file()
|
||||
&& config_metadata.is_file()
|
||||
{
|
||||
let manifest_text = fs::read_to_string(&manifest_path).await?;
|
||||
let manifest: ImageManifest = serde_json::from_str(&manifest_text)?;
|
||||
let config_text = fs::read_to_string(&config_path).await?;
|
||||
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
|
||||
debug!("cache hit digest={}", digest);
|
||||
Some(ImageInfo::new(squashfs_path.clone(), manifest, config)?)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("cache miss digest={}", digest);
|
||||
None
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn store(&self, digest: &str, info: &ImageInfo) -> Result<ImageInfo> {
|
||||
debug!("cache store digest={}", digest);
|
||||
let mut squashfs_path = self.cache_dir.clone();
|
||||
let mut manifest_path = self.cache_dir.clone();
|
||||
let mut config_path = self.cache_dir.clone();
|
||||
squashfs_path.push(format!("{}.squashfs", digest));
|
||||
manifest_path.push(format!("{}.manifest.json", digest));
|
||||
config_path.push(format!("{}.config.json", digest));
|
||||
fs::copy(&info.image_squashfs, &squashfs_path).await?;
|
||||
let manifest_text = serde_json::to_string_pretty(&info.manifest)?;
|
||||
fs::write(&manifest_path, manifest_text).await?;
|
||||
let config_text = serde_json::to_string_pretty(&info.config)?;
|
||||
fs::write(&config_path, config_text).await?;
|
||||
ImageInfo::new(
|
||||
squashfs_path.clone(),
|
||||
info.manifest.clone(),
|
||||
info.config.clone(),
|
||||
)
|
||||
}
|
||||
}
|
@ -1,411 +0,0 @@
|
||||
use crate::cache::ImageCache;
|
||||
use crate::fetch::{OciImageDownloader, OciImageLayer};
|
||||
use crate::name::ImageName;
|
||||
use crate::registry::OciRegistryPlatform;
|
||||
use anyhow::{anyhow, Result};
|
||||
use backhand::compression::Compressor;
|
||||
use backhand::{FilesystemCompressor, FilesystemWriter, NodeHeader};
|
||||
use log::{debug, trace, warn};
|
||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
||||
use std::borrow::Cow;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, ErrorKind, Read};
|
||||
use std::os::unix::fs::{FileTypeExt, MetadataExt, PermissionsExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio_stream::StreamExt;
|
||||
use tokio_tar::{Archive, Entry};
|
||||
use uuid::Uuid;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
pub const IMAGE_SQUASHFS_VERSION: u64 = 2;
|
||||
|
||||
pub struct ImageInfo {
|
||||
pub image_squashfs: PathBuf,
|
||||
pub manifest: ImageManifest,
|
||||
pub config: ImageConfiguration,
|
||||
}
|
||||
|
||||
impl ImageInfo {
|
||||
pub fn new(
|
||||
squashfs: PathBuf,
|
||||
manifest: ImageManifest,
|
||||
config: ImageConfiguration,
|
||||
) -> Result<ImageInfo> {
|
||||
Ok(ImageInfo {
|
||||
image_squashfs: squashfs,
|
||||
manifest,
|
||||
config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ImageCompiler<'a> {
|
||||
cache: &'a ImageCache,
|
||||
seed: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl ImageCompiler<'_> {
|
||||
pub fn new(cache: &ImageCache, seed: Option<PathBuf>) -> Result<ImageCompiler> {
|
||||
Ok(ImageCompiler { cache, seed })
|
||||
}
|
||||
|
||||
pub async fn compile(&self, image: &ImageName) -> Result<ImageInfo> {
|
||||
debug!("compile image={image}");
|
||||
let mut tmp_dir = std::env::temp_dir().clone();
|
||||
tmp_dir.push(format!("krata-compile-{}", Uuid::new_v4()));
|
||||
|
||||
let mut image_dir = tmp_dir.clone();
|
||||
image_dir.push("image");
|
||||
fs::create_dir_all(&image_dir).await?;
|
||||
|
||||
let mut layer_dir = tmp_dir.clone();
|
||||
layer_dir.push("layer");
|
||||
fs::create_dir_all(&layer_dir).await?;
|
||||
|
||||
let mut squash_file = tmp_dir.clone();
|
||||
squash_file.push("image.squashfs");
|
||||
let info = self
|
||||
.download_and_compile(image, &layer_dir, &image_dir, &squash_file)
|
||||
.await?;
|
||||
fs::remove_dir_all(&tmp_dir).await?;
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
async fn download_and_compile(
|
||||
&self,
|
||||
image: &ImageName,
|
||||
layer_dir: &Path,
|
||||
image_dir: &Path,
|
||||
squash_file: &Path,
|
||||
) -> Result<ImageInfo> {
|
||||
let downloader = OciImageDownloader::new(
|
||||
self.seed.clone(),
|
||||
layer_dir.to_path_buf(),
|
||||
OciRegistryPlatform::current(),
|
||||
);
|
||||
let resolved = downloader.resolve(image.clone()).await?;
|
||||
let cache_key = format!(
|
||||
"manifest={}:squashfs-version={}\n",
|
||||
resolved.digest, IMAGE_SQUASHFS_VERSION
|
||||
);
|
||||
let cache_digest = sha256::digest(cache_key);
|
||||
|
||||
if let Some(cached) = self.cache.recall(&cache_digest).await? {
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
let local = downloader.download(resolved).await?;
|
||||
for layer in &local.layers {
|
||||
debug!(
|
||||
"process layer digest={} compression={:?}",
|
||||
&layer.digest, layer.compression,
|
||||
);
|
||||
let whiteouts = self.process_layer_whiteout(layer, image_dir).await?;
|
||||
debug!(
|
||||
"process layer digest={} whiteouts={:?}",
|
||||
&layer.digest, whiteouts
|
||||
);
|
||||
let mut archive = layer.archive().await?;
|
||||
let mut entries = archive.entries()?;
|
||||
while let Some(entry) = entries.next().await {
|
||||
let mut entry = entry?;
|
||||
let path = entry.path()?;
|
||||
let mut maybe_whiteout_path_str =
|
||||
path.to_str().map(|x| x.to_string()).unwrap_or_default();
|
||||
if whiteouts.contains(&maybe_whiteout_path_str) {
|
||||
continue;
|
||||
}
|
||||
maybe_whiteout_path_str.push('/');
|
||||
if whiteouts.contains(&maybe_whiteout_path_str) {
|
||||
continue;
|
||||
}
|
||||
let Some(name) = path.file_name() else {
|
||||
return Err(anyhow!("unable to get file name"));
|
||||
};
|
||||
let Some(name) = name.to_str() else {
|
||||
return Err(anyhow!("unable to get file name as string"));
|
||||
};
|
||||
|
||||
if name.starts_with(".wh.") {
|
||||
continue;
|
||||
} else {
|
||||
self.process_write_entry(&mut entry, layer, image_dir)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for layer in &local.layers {
|
||||
if layer.path.exists() {
|
||||
fs::remove_file(&layer.path).await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.squash(image_dir, squash_file)?;
|
||||
let info = ImageInfo::new(
|
||||
squash_file.to_path_buf(),
|
||||
local.image.manifest,
|
||||
local.config,
|
||||
)?;
|
||||
self.cache.store(&cache_digest, &info).await
|
||||
}
|
||||
|
||||
async fn process_layer_whiteout(
|
||||
&self,
|
||||
layer: &OciImageLayer,
|
||||
image_dir: &Path,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut whiteouts = Vec::new();
|
||||
let mut archive = layer.archive().await?;
|
||||
let mut entries = archive.entries()?;
|
||||
while let Some(entry) = entries.next().await {
|
||||
let entry = entry?;
|
||||
let path = entry.path()?;
|
||||
let Some(name) = path.file_name() else {
|
||||
return Err(anyhow!("unable to get file name"));
|
||||
};
|
||||
let Some(name) = name.to_str() else {
|
||||
return Err(anyhow!("unable to get file name as string"));
|
||||
};
|
||||
|
||||
if name.starts_with(".wh.") {
|
||||
let path = self
|
||||
.process_whiteout_entry(&entry, name, layer, image_dir)
|
||||
.await?;
|
||||
if let Some(path) = path {
|
||||
whiteouts.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(whiteouts)
|
||||
}
|
||||
|
||||
async fn process_whiteout_entry(
|
||||
&self,
|
||||
entry: &Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
||||
name: &str,
|
||||
layer: &OciImageLayer,
|
||||
image_dir: &Path,
|
||||
) -> Result<Option<String>> {
|
||||
let path = entry.path()?;
|
||||
let mut dst = self.check_safe_entry(path.clone(), image_dir)?;
|
||||
dst.pop();
|
||||
let mut path = path.to_path_buf();
|
||||
path.pop();
|
||||
|
||||
let opaque = name == ".wh..wh..opq";
|
||||
|
||||
if !opaque {
|
||||
let file = &name[4..];
|
||||
dst.push(file);
|
||||
path.push(file);
|
||||
self.check_safe_path(&dst, image_dir)?;
|
||||
}
|
||||
|
||||
trace!("whiteout entry layer={} path={:?}", &layer.digest, path,);
|
||||
|
||||
let whiteout = path
|
||||
.to_str()
|
||||
.ok_or(anyhow!("unable to convert path to string"))?
|
||||
.to_string();
|
||||
|
||||
if opaque {
|
||||
if dst.is_dir() {
|
||||
let mut reader = fs::read_dir(dst).await?;
|
||||
while let Some(entry) = reader.next_entry().await? {
|
||||
let path = entry.path();
|
||||
if path.is_symlink() || path.is_file() {
|
||||
fs::remove_file(&path).await?;
|
||||
} else if path.is_dir() {
|
||||
fs::remove_dir_all(&path).await?;
|
||||
} else {
|
||||
return Err(anyhow!("opaque whiteout entry did not exist"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
"whiteout opaque entry missing locally layer={} path={:?} local={:?}",
|
||||
&layer.digest,
|
||||
entry.path()?,
|
||||
dst,
|
||||
);
|
||||
}
|
||||
} else if dst.is_file() || dst.is_symlink() {
|
||||
fs::remove_file(&dst).await?;
|
||||
} else if dst.is_dir() {
|
||||
fs::remove_dir_all(&dst).await?;
|
||||
} else {
|
||||
debug!(
|
||||
"whiteout entry missing locally layer={} path={:?} local={:?}",
|
||||
&layer.digest,
|
||||
entry.path()?,
|
||||
dst,
|
||||
);
|
||||
}
|
||||
Ok(if opaque { None } else { Some(whiteout) })
|
||||
}
|
||||
|
||||
async fn process_write_entry(
|
||||
&self,
|
||||
entry: &mut Entry<Archive<Pin<Box<dyn AsyncRead + Send>>>>,
|
||||
layer: &OciImageLayer,
|
||||
image_dir: &Path,
|
||||
) -> Result<()> {
|
||||
let uid = entry.header().uid()?;
|
||||
let gid = entry.header().gid()?;
|
||||
trace!(
|
||||
"unpack entry layer={} path={:?} type={:?} uid={} gid={}",
|
||||
&layer.digest,
|
||||
entry.path()?,
|
||||
entry.header().entry_type(),
|
||||
uid,
|
||||
gid,
|
||||
);
|
||||
entry.set_preserve_mtime(true);
|
||||
entry.set_preserve_permissions(true);
|
||||
entry.set_unpack_xattrs(true);
|
||||
if let Some(path) = entry.unpack_in(image_dir).await? {
|
||||
if !path.is_symlink() {
|
||||
std::os::unix::fs::chown(path, Some(uid as u32), Some(gid as u32))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_safe_entry(&self, path: Cow<Path>, image_dir: &Path) -> Result<PathBuf> {
|
||||
let mut dst = image_dir.to_path_buf();
|
||||
dst.push(path);
|
||||
if let Some(name) = dst.file_name() {
|
||||
if let Some(name) = name.to_str() {
|
||||
if name.starts_with(".wh.") {
|
||||
let copy = dst.clone();
|
||||
dst.pop();
|
||||
self.check_safe_path(&dst, image_dir)?;
|
||||
return Ok(copy);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.check_safe_path(&dst, image_dir)?;
|
||||
Ok(dst)
|
||||
}
|
||||
|
||||
fn check_safe_path(&self, dst: &Path, image_dir: &Path) -> Result<()> {
|
||||
let resolved = path_clean::clean(dst);
|
||||
if !resolved.starts_with(image_dir) {
|
||||
return Err(anyhow!("layer attempts to work outside image dir"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn squash(&self, image_dir: &Path, squash_file: &Path) -> Result<()> {
|
||||
let mut writer = FilesystemWriter::default();
|
||||
writer.set_compressor(FilesystemCompressor::new(Compressor::Gzip, None)?);
|
||||
let walk = WalkDir::new(image_dir).follow_links(false);
|
||||
for entry in walk {
|
||||
let entry = entry?;
|
||||
let rel = entry
|
||||
.path()
|
||||
.strip_prefix(image_dir)?
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to strip prefix of tmpdir"))?;
|
||||
let rel = format!("/{}", rel);
|
||||
trace!("squash write {}", rel);
|
||||
let typ = entry.file_type();
|
||||
let metadata = std::fs::symlink_metadata(entry.path())?;
|
||||
let uid = metadata.uid();
|
||||
let gid = metadata.gid();
|
||||
let mode = metadata.permissions().mode();
|
||||
let mtime = metadata.mtime();
|
||||
|
||||
if rel == "/" {
|
||||
writer.set_root_uid(uid);
|
||||
writer.set_root_gid(gid);
|
||||
writer.set_root_mode(mode as u16);
|
||||
continue;
|
||||
}
|
||||
|
||||
let header = NodeHeader {
|
||||
permissions: mode as u16,
|
||||
uid,
|
||||
gid,
|
||||
mtime: mtime as u32,
|
||||
};
|
||||
if typ.is_symlink() {
|
||||
let symlink = std::fs::read_link(entry.path())?;
|
||||
let symlink = symlink
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to read symlink"))?;
|
||||
writer.push_symlink(symlink, rel, header)?;
|
||||
} else if typ.is_dir() {
|
||||
writer.push_dir(rel, header)?;
|
||||
} else if typ.is_file() {
|
||||
writer.push_file(ConsumingFileReader::new(entry.path()), rel, header)?;
|
||||
} else if typ.is_block_device() {
|
||||
let device = metadata.dev();
|
||||
writer.push_block_device(device as u32, rel, header)?;
|
||||
} else if typ.is_char_device() {
|
||||
let device = metadata.dev();
|
||||
writer.push_char_device(device as u32, rel, header)?;
|
||||
} else if typ.is_fifo() {
|
||||
writer.push_fifo(rel, header)?;
|
||||
} else if typ.is_socket() {
|
||||
writer.push_socket(rel, header)?;
|
||||
} else {
|
||||
return Err(anyhow!("invalid file type"));
|
||||
}
|
||||
}
|
||||
|
||||
let squash_file_path = squash_file
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert squashfs string"))?;
|
||||
|
||||
let file = File::create(squash_file)?;
|
||||
let mut bufwrite = BufWriter::new(file);
|
||||
trace!("squash generate: {}", squash_file_path);
|
||||
writer.write(&mut bufwrite)?;
|
||||
std::fs::remove_dir_all(image_dir)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ConsumingFileReader {
|
||||
path: PathBuf,
|
||||
file: Option<File>,
|
||||
}
|
||||
|
||||
impl ConsumingFileReader {
|
||||
fn new(path: &Path) -> ConsumingFileReader {
|
||||
ConsumingFileReader {
|
||||
path: path.to_path_buf(),
|
||||
file: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for ConsumingFileReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
if self.file.is_none() {
|
||||
self.file = Some(File::open(&self.path)?);
|
||||
}
|
||||
let Some(ref mut file) = self.file else {
|
||||
return Err(std::io::Error::new(
|
||||
ErrorKind::NotFound,
|
||||
"file was not opened",
|
||||
));
|
||||
};
|
||||
file.read(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ConsumingFileReader {
|
||||
fn drop(&mut self) {
|
||||
let file = self.file.take();
|
||||
drop(file);
|
||||
if let Err(error) = std::fs::remove_file(&self.path) {
|
||||
warn!("failed to delete consuming file {:?}: {}", self.path, error);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,8 @@
|
||||
use crate::progress::{OciBoundProgress, OciProgressPhase};
|
||||
|
||||
use super::{
|
||||
name::ImageName,
|
||||
registry::{OciRegistryClient, OciRegistryPlatform},
|
||||
registry::{OciPlatform, OciRegistryClient},
|
||||
};
|
||||
|
||||
use std::{
|
||||
@ -22,10 +24,10 @@ use tokio::{
|
||||
use tokio_stream::StreamExt;
|
||||
use tokio_tar::Archive;
|
||||
|
||||
pub struct OciImageDownloader {
|
||||
pub struct OciImageFetcher {
|
||||
seed: Option<PathBuf>,
|
||||
storage: PathBuf,
|
||||
platform: OciRegistryPlatform,
|
||||
platform: OciPlatform,
|
||||
progress: OciBoundProgress,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
@ -74,16 +76,16 @@ pub struct OciLocalImage {
|
||||
pub layers: Vec<OciImageLayer>,
|
||||
}
|
||||
|
||||
impl OciImageDownloader {
|
||||
impl OciImageFetcher {
|
||||
pub fn new(
|
||||
seed: Option<PathBuf>,
|
||||
storage: PathBuf,
|
||||
platform: OciRegistryPlatform,
|
||||
) -> OciImageDownloader {
|
||||
OciImageDownloader {
|
||||
platform: OciPlatform,
|
||||
progress: OciBoundProgress,
|
||||
) -> OciImageFetcher {
|
||||
OciImageFetcher {
|
||||
seed,
|
||||
storage,
|
||||
platform,
|
||||
progress,
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,9 +210,17 @@ impl OciImageDownloader {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn download(&self, image: OciResolvedImage) -> Result<OciLocalImage> {
|
||||
pub async fn download(
|
||||
&self,
|
||||
image: OciResolvedImage,
|
||||
layer_dir: &Path,
|
||||
) -> Result<OciLocalImage> {
|
||||
let config: ImageConfiguration;
|
||||
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::ConfigAcquire;
|
||||
})
|
||||
.await;
|
||||
let mut client = OciRegistryClient::new(image.name.registry_url()?, self.platform.clone())?;
|
||||
if let Some(seeded) = self
|
||||
.load_seed_json_blob::<ImageConfiguration>(image.manifest.config())
|
||||
@ -223,9 +233,31 @@ impl OciImageDownloader {
|
||||
.await?;
|
||||
config = serde_json::from_slice(&config_bytes)?;
|
||||
}
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::LayerAcquire;
|
||||
|
||||
for layer in image.manifest.layers() {
|
||||
progress.add_layer(layer.digest(), layer.size() as usize);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
let mut layers = Vec::new();
|
||||
for layer in image.manifest.layers() {
|
||||
layers.push(self.acquire_layer(&image.name, layer, &mut client).await?);
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.downloading_layer(layer.digest(), 0, layer.size() as usize);
|
||||
})
|
||||
.await;
|
||||
layers.push(
|
||||
self.acquire_layer(&image.name, layer, layer_dir, &mut client)
|
||||
.await?,
|
||||
);
|
||||
self.progress
|
||||
.update(|progress| {
|
||||
progress.downloaded_layer(layer.digest());
|
||||
})
|
||||
.await;
|
||||
}
|
||||
Ok(OciLocalImage {
|
||||
image,
|
||||
@ -238,6 +270,7 @@ impl OciImageDownloader {
|
||||
&self,
|
||||
image: &ImageName,
|
||||
layer: &Descriptor,
|
||||
layer_dir: &Path,
|
||||
client: &mut OciRegistryClient,
|
||||
) -> Result<OciImageLayer> {
|
||||
debug!(
|
||||
@ -245,13 +278,15 @@ impl OciImageDownloader {
|
||||
layer.digest(),
|
||||
layer.size()
|
||||
);
|
||||
let mut layer_path = self.storage.clone();
|
||||
let mut layer_path = layer_dir.to_path_buf();
|
||||
layer_path.push(format!("{}.layer", layer.digest()));
|
||||
|
||||
let seeded = self.extract_seed_blob(layer, &layer_path).await?;
|
||||
if !seeded {
|
||||
let file = File::create(&layer_path).await?;
|
||||
let size = client.write_blob_to_file(&image.name, layer, file).await?;
|
||||
let size = client
|
||||
.write_blob_to_file(&image.name, layer, file, Some(self.progress.clone()))
|
||||
.await?;
|
||||
if layer.size() as u64 != size {
|
||||
return Err(anyhow!(
|
||||
"downloaded layer size differs from size in manifest",
|
||||
|
@ -1,5 +1,7 @@
|
||||
pub mod cache;
|
||||
pub mod compiler;
|
||||
pub mod assemble;
|
||||
pub mod fetch;
|
||||
pub mod name;
|
||||
pub mod packer;
|
||||
pub mod progress;
|
||||
pub mod registry;
|
||||
pub mod vfs;
|
||||
|
201
crates/oci/src/packer/backend.rs
Normal file
201
crates/oci/src/packer/backend.rs
Normal file
@ -0,0 +1,201 @@
|
||||
use std::{path::Path, process::Stdio, sync::Arc};
|
||||
|
||||
use super::OciPackedFormat;
|
||||
use crate::{
|
||||
progress::{OciBoundProgress, OciProgressPhase},
|
||||
vfs::VfsTree,
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::warn;
|
||||
use tokio::{pin, process::Command, select};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum OciPackerBackendType {
|
||||
MkSquashfs,
|
||||
MkfsErofs,
|
||||
}
|
||||
|
||||
impl OciPackerBackendType {
|
||||
pub fn format(&self) -> OciPackedFormat {
|
||||
match self {
|
||||
OciPackerBackendType::MkSquashfs => OciPackedFormat::Squashfs,
|
||||
OciPackerBackendType::MkfsErofs => OciPackedFormat::Erofs,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create(&self) -> Box<dyn OciPackerBackend> {
|
||||
match self {
|
||||
OciPackerBackendType::MkSquashfs => {
|
||||
Box::new(OciPackerMkSquashfs {}) as Box<dyn OciPackerBackend>
|
||||
}
|
||||
OciPackerBackendType::MkfsErofs => {
|
||||
Box::new(OciPackerMkfsErofs {}) as Box<dyn OciPackerBackend>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait OciPackerBackend: Send + Sync {
|
||||
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()>;
|
||||
}
|
||||
|
||||
pub struct OciPackerMkSquashfs {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl OciPackerBackend for OciPackerMkSquashfs {
|
||||
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, file: &Path) -> Result<()> {
|
||||
progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::Packing;
|
||||
progress.total = 1;
|
||||
progress.value = 0;
|
||||
})
|
||||
.await;
|
||||
|
||||
let mut child = Command::new("mksquashfs")
|
||||
.arg("-")
|
||||
.arg(file)
|
||||
.arg("-comp")
|
||||
.arg("gzip")
|
||||
.arg("-tar")
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()?;
|
||||
let stdin = child
|
||||
.stdin
|
||||
.take()
|
||||
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
||||
let mut writer = Some(tokio::task::spawn(async move {
|
||||
if let Err(error) = vfs.write_to_tar(stdin).await {
|
||||
warn!("failed to write tar: {}", error);
|
||||
return Err(error);
|
||||
}
|
||||
Ok(())
|
||||
}));
|
||||
let wait = child.wait();
|
||||
pin!(wait);
|
||||
let status_result = loop {
|
||||
if let Some(inner) = writer.as_mut() {
|
||||
select! {
|
||||
x = inner => {
|
||||
writer = None;
|
||||
match x {
|
||||
Ok(_) => {},
|
||||
Err(error) => {
|
||||
return Err(error.into());
|
||||
}
|
||||
}
|
||||
},
|
||||
status = &mut wait => {
|
||||
break status;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
select! {
|
||||
status = &mut wait => {
|
||||
break status;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
if let Some(writer) = writer {
|
||||
writer.await??;
|
||||
}
|
||||
let status = status_result?;
|
||||
if !status.success() {
|
||||
Err(anyhow!(
|
||||
"mksquashfs failed with exit code: {}",
|
||||
status.code().unwrap()
|
||||
))
|
||||
} else {
|
||||
progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::Packing;
|
||||
progress.total = 1;
|
||||
progress.value = 1;
|
||||
})
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OciPackerMkfsErofs {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl OciPackerBackend for OciPackerMkfsErofs {
|
||||
async fn pack(&self, progress: OciBoundProgress, vfs: Arc<VfsTree>, path: &Path) -> Result<()> {
|
||||
progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::Packing;
|
||||
progress.total = 1;
|
||||
progress.value = 0;
|
||||
})
|
||||
.await;
|
||||
|
||||
let mut child = Command::new("mkfs.erofs")
|
||||
.arg("-L")
|
||||
.arg("root")
|
||||
.arg("--tar=-")
|
||||
.arg(path)
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()?;
|
||||
let stdin = child
|
||||
.stdin
|
||||
.take()
|
||||
.ok_or(anyhow!("unable to acquire stdin stream"))?;
|
||||
let mut writer = Some(tokio::task::spawn(
|
||||
async move { vfs.write_to_tar(stdin).await },
|
||||
));
|
||||
let wait = child.wait();
|
||||
pin!(wait);
|
||||
let status_result = loop {
|
||||
if let Some(inner) = writer.as_mut() {
|
||||
select! {
|
||||
x = inner => {
|
||||
match x {
|
||||
Ok(_) => {
|
||||
writer = None;
|
||||
},
|
||||
Err(error) => {
|
||||
return Err(error.into());
|
||||
}
|
||||
}
|
||||
},
|
||||
status = &mut wait => {
|
||||
break status;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
select! {
|
||||
status = &mut wait => {
|
||||
break status;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
if let Some(writer) = writer {
|
||||
writer.await??;
|
||||
}
|
||||
let status = status_result?;
|
||||
if !status.success() {
|
||||
Err(anyhow!(
|
||||
"mkfs.erofs failed with exit code: {}",
|
||||
status.code().unwrap()
|
||||
))
|
||||
} else {
|
||||
progress
|
||||
.update(|progress| {
|
||||
progress.phase = OciProgressPhase::Packing;
|
||||
progress.total = 1;
|
||||
progress.value = 1;
|
||||
})
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
84
crates/oci/src/packer/cache.rs
Normal file
84
crates/oci/src/packer/cache.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use crate::packer::{OciImagePacked, OciPackedFormat};
|
||||
|
||||
use anyhow::Result;
|
||||
use log::debug;
|
||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::fs;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OciPackerCache {
|
||||
cache_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl OciPackerCache {
|
||||
pub fn new(cache_dir: &Path) -> Result<OciPackerCache> {
|
||||
Ok(OciPackerCache {
|
||||
cache_dir: cache_dir.to_path_buf(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn recall(
|
||||
&self,
|
||||
digest: &str,
|
||||
format: OciPackedFormat,
|
||||
) -> Result<Option<OciImagePacked>> {
|
||||
let mut fs_path = self.cache_dir.clone();
|
||||
let mut config_path = self.cache_dir.clone();
|
||||
let mut manifest_path = self.cache_dir.clone();
|
||||
fs_path.push(format!("{}.{}", digest, format.extension()));
|
||||
manifest_path.push(format!("{}.manifest.json", digest));
|
||||
config_path.push(format!("{}.config.json", digest));
|
||||
Ok(
|
||||
if fs_path.exists() && manifest_path.exists() && config_path.exists() {
|
||||
let image_metadata = fs::metadata(&fs_path).await?;
|
||||
let manifest_metadata = fs::metadata(&manifest_path).await?;
|
||||
let config_metadata = fs::metadata(&config_path).await?;
|
||||
if image_metadata.is_file()
|
||||
&& manifest_metadata.is_file()
|
||||
&& config_metadata.is_file()
|
||||
{
|
||||
let manifest_text = fs::read_to_string(&manifest_path).await?;
|
||||
let manifest: ImageManifest = serde_json::from_str(&manifest_text)?;
|
||||
let config_text = fs::read_to_string(&config_path).await?;
|
||||
let config: ImageConfiguration = serde_json::from_str(&config_text)?;
|
||||
debug!("cache hit digest={}", digest);
|
||||
Some(OciImagePacked::new(
|
||||
digest.to_string(),
|
||||
fs_path.clone(),
|
||||
format,
|
||||
config,
|
||||
manifest,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
debug!("cache miss digest={}", digest);
|
||||
None
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn store(&self, packed: OciImagePacked) -> Result<OciImagePacked> {
|
||||
debug!("cache store digest={}", packed.digest);
|
||||
let mut fs_path = self.cache_dir.clone();
|
||||
let mut manifest_path = self.cache_dir.clone();
|
||||
let mut config_path = self.cache_dir.clone();
|
||||
fs_path.push(format!("{}.{}", packed.digest, packed.format.extension()));
|
||||
manifest_path.push(format!("{}.manifest.json", packed.digest));
|
||||
config_path.push(format!("{}.config.json", packed.digest));
|
||||
fs::copy(&packed.path, &fs_path).await?;
|
||||
let manifest_text = serde_json::to_string_pretty(&packed.manifest)?;
|
||||
fs::write(&manifest_path, manifest_text).await?;
|
||||
let config_text = serde_json::to_string_pretty(&packed.config)?;
|
||||
fs::write(&config_path, config_text).await?;
|
||||
Ok(OciImagePacked::new(
|
||||
packed.digest,
|
||||
fs_path.clone(),
|
||||
packed.format,
|
||||
packed.config,
|
||||
packed.manifest,
|
||||
))
|
||||
}
|
||||
}
|
58
crates/oci/src/packer/mod.rs
Normal file
58
crates/oci/src/packer/mod.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use self::backend::OciPackerBackendType;
|
||||
use oci_spec::image::{ImageConfiguration, ImageManifest};
|
||||
|
||||
pub mod backend;
|
||||
pub mod cache;
|
||||
pub mod service;
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
pub enum OciPackedFormat {
|
||||
#[default]
|
||||
Squashfs,
|
||||
Erofs,
|
||||
}
|
||||
|
||||
impl OciPackedFormat {
|
||||
pub fn extension(&self) -> &str {
|
||||
match self {
|
||||
OciPackedFormat::Squashfs => "squashfs",
|
||||
OciPackedFormat::Erofs => "erofs",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backend(&self) -> OciPackerBackendType {
|
||||
match self {
|
||||
OciPackedFormat::Squashfs => OciPackerBackendType::MkSquashfs,
|
||||
OciPackedFormat::Erofs => OciPackerBackendType::MkfsErofs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OciImagePacked {
|
||||
pub digest: String,
|
||||
pub path: PathBuf,
|
||||
pub format: OciPackedFormat,
|
||||
pub config: ImageConfiguration,
|
||||
pub manifest: ImageManifest,
|
||||
}
|
||||
|
||||
impl OciImagePacked {
|
||||
pub fn new(
|
||||
digest: String,
|
||||
path: PathBuf,
|
||||
format: OciPackedFormat,
|
||||
config: ImageConfiguration,
|
||||
manifest: ImageManifest,
|
||||
) -> OciImagePacked {
|
||||
OciImagePacked {
|
||||
digest,
|
||||
path,
|
||||
format,
|
||||
config,
|
||||
manifest,
|
||||
}
|
||||
}
|
||||
}
|
81
crates/oci/src/packer/service.rs
Normal file
81
crates/oci/src/packer/service.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use crate::{
|
||||
assemble::OciImageAssembler,
|
||||
fetch::OciImageFetcher,
|
||||
name::ImageName,
|
||||
progress::{OciBoundProgress, OciProgress, OciProgressContext},
|
||||
registry::OciPlatform,
|
||||
};
|
||||
|
||||
use super::{cache::OciPackerCache, OciImagePacked, OciPackedFormat};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OciPackerService {
|
||||
seed: Option<PathBuf>,
|
||||
platform: OciPlatform,
|
||||
cache: OciPackerCache,
|
||||
}
|
||||
|
||||
impl OciPackerService {
|
||||
pub fn new(
|
||||
seed: Option<PathBuf>,
|
||||
cache_dir: &Path,
|
||||
platform: OciPlatform,
|
||||
) -> Result<OciPackerService> {
|
||||
Ok(OciPackerService {
|
||||
seed,
|
||||
cache: OciPackerCache::new(cache_dir)?,
|
||||
platform,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn recall(
|
||||
&self,
|
||||
digest: &str,
|
||||
format: OciPackedFormat,
|
||||
) -> Result<Option<OciImagePacked>> {
|
||||
self.cache.recall(digest, format).await
|
||||
}
|
||||
|
||||
pub async fn request(
|
||||
&self,
|
||||
name: ImageName,
|
||||
format: OciPackedFormat,
|
||||
progress_context: OciProgressContext,
|
||||
) -> Result<OciImagePacked> {
|
||||
let progress = OciProgress::new();
|
||||
let progress = OciBoundProgress::new(progress_context.clone(), progress);
|
||||
let fetcher =
|
||||
OciImageFetcher::new(self.seed.clone(), self.platform.clone(), progress.clone());
|
||||
let resolved = fetcher.resolve(name).await?;
|
||||
if let Some(cached) = self.cache.recall(&resolved.digest, format).await? {
|
||||
return Ok(cached);
|
||||
}
|
||||
let assembler =
|
||||
OciImageAssembler::new(fetcher, resolved, progress.clone(), None, None).await?;
|
||||
let assembled = assembler.assemble().await?;
|
||||
let mut file = assembled
|
||||
.tmp_dir
|
||||
.clone()
|
||||
.ok_or(anyhow!("tmp_dir was missing when packing image"))?;
|
||||
file.push("image.pack");
|
||||
let target = file.clone();
|
||||
let packer = format.backend().create();
|
||||
packer
|
||||
.pack(progress, assembled.vfs.clone(), &target)
|
||||
.await?;
|
||||
|
||||
let packed = OciImagePacked::new(
|
||||
assembled.digest.clone(),
|
||||
file,
|
||||
format,
|
||||
assembled.config.clone(),
|
||||
assembled.manifest.clone(),
|
||||
);
|
||||
let packed = self.cache.store(packed).await?;
|
||||
Ok(packed)
|
||||
}
|
||||
}
|
140
crates/oci/src/progress.rs
Normal file
140
crates/oci/src/progress.rs
Normal file
@ -0,0 +1,140 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use tokio::sync::{mpsc::Sender, Mutex};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OciProgress {
|
||||
pub phase: OciProgressPhase,
|
||||
pub layers: IndexMap<String, OciProgressLayer>,
|
||||
pub value: u64,
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
impl Default for OciProgress {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl OciProgress {
|
||||
pub fn new() -> Self {
|
||||
OciProgress {
|
||||
phase: OciProgressPhase::Resolving,
|
||||
layers: IndexMap::new(),
|
||||
value: 0,
|
||||
total: 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_layer(&mut self, id: &str, size: usize) {
|
||||
self.layers.insert(
|
||||
id.to_string(),
|
||||
OciProgressLayer {
|
||||
id: id.to_string(),
|
||||
phase: OciProgressLayerPhase::Waiting,
|
||||
value: 0,
|
||||
total: size as u64,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn downloading_layer(&mut self, id: &str, downloaded: usize, total: usize) {
|
||||
if let Some(entry) = self.layers.get_mut(id) {
|
||||
entry.phase = OciProgressLayerPhase::Downloading;
|
||||
entry.value = downloaded as u64;
|
||||
entry.total = total as u64;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn downloaded_layer(&mut self, id: &str) {
|
||||
if let Some(entry) = self.layers.get_mut(id) {
|
||||
entry.phase = OciProgressLayerPhase::Downloaded;
|
||||
entry.value = entry.total;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extracting_layer(&mut self, id: &str, extracted: usize, total: usize) {
|
||||
if let Some(entry) = self.layers.get_mut(id) {
|
||||
entry.phase = OciProgressLayerPhase::Extracting;
|
||||
entry.value = extracted as u64;
|
||||
entry.total = total as u64;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extracted_layer(&mut self, id: &str) {
|
||||
if let Some(entry) = self.layers.get_mut(id) {
|
||||
entry.phase = OciProgressLayerPhase::Extracted;
|
||||
entry.value = entry.total;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OciProgressPhase {
|
||||
Resolving,
|
||||
Resolved,
|
||||
ConfigAcquire,
|
||||
LayerAcquire,
|
||||
Packing,
|
||||
Complete,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OciProgressLayer {
|
||||
pub id: String,
|
||||
pub phase: OciProgressLayerPhase,
|
||||
pub value: u64,
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OciProgressLayerPhase {
|
||||
Waiting,
|
||||
Downloading,
|
||||
Downloaded,
|
||||
Extracting,
|
||||
Extracted,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OciProgressContext {
|
||||
sender: Sender<OciProgress>,
|
||||
}
|
||||
|
||||
impl OciProgressContext {
|
||||
pub fn new(sender: Sender<OciProgress>) -> OciProgressContext {
|
||||
OciProgressContext { sender }
|
||||
}
|
||||
|
||||
pub fn update(&self, progress: &OciProgress) {
|
||||
let _ = self.sender.try_send(progress.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OciBoundProgress {
|
||||
context: OciProgressContext,
|
||||
instance: Arc<Mutex<OciProgress>>,
|
||||
}
|
||||
|
||||
impl OciBoundProgress {
|
||||
pub fn new(context: OciProgressContext, progress: OciProgress) -> OciBoundProgress {
|
||||
OciBoundProgress {
|
||||
context,
|
||||
instance: Arc::new(Mutex::new(progress)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update(&self, function: impl FnOnce(&mut OciProgress)) {
|
||||
let mut progress = self.instance.lock().await;
|
||||
function(&mut progress);
|
||||
self.context.update(&progress);
|
||||
}
|
||||
|
||||
pub fn update_blocking(&self, function: impl FnOnce(&mut OciProgress)) {
|
||||
let mut progress = self.instance.blocking_lock();
|
||||
function(&mut progress);
|
||||
self.context.update(&progress);
|
||||
}
|
||||
}
|
@ -7,26 +7,28 @@ use reqwest::{Client, RequestBuilder, Response, StatusCode};
|
||||
use tokio::{fs::File, io::AsyncWriteExt};
|
||||
use url::Url;
|
||||
|
||||
use crate::progress::OciBoundProgress;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct OciRegistryPlatform {
|
||||
pub struct OciPlatform {
|
||||
pub os: Os,
|
||||
pub arch: Arch,
|
||||
}
|
||||
|
||||
impl OciRegistryPlatform {
|
||||
impl OciPlatform {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
const CURRENT_ARCH: Arch = Arch::Amd64;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
const CURRENT_ARCH: Arch = Arch::ARM64;
|
||||
|
||||
pub fn new(os: Os, arch: Arch) -> OciRegistryPlatform {
|
||||
OciRegistryPlatform { os, arch }
|
||||
pub fn new(os: Os, arch: Arch) -> OciPlatform {
|
||||
OciPlatform { os, arch }
|
||||
}
|
||||
|
||||
pub fn current() -> OciRegistryPlatform {
|
||||
OciRegistryPlatform {
|
||||
pub fn current() -> OciPlatform {
|
||||
OciPlatform {
|
||||
os: Os::Linux,
|
||||
arch: OciRegistryPlatform::CURRENT_ARCH,
|
||||
arch: OciPlatform::CURRENT_ARCH,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -34,12 +36,12 @@ impl OciRegistryPlatform {
|
||||
pub struct OciRegistryClient {
|
||||
agent: Client,
|
||||
url: Url,
|
||||
platform: OciRegistryPlatform,
|
||||
platform: OciPlatform,
|
||||
token: Option<String>,
|
||||
}
|
||||
|
||||
impl OciRegistryClient {
|
||||
pub fn new(url: Url, platform: OciRegistryPlatform) -> Result<OciRegistryClient> {
|
||||
pub fn new(url: Url, platform: OciPlatform) -> Result<OciRegistryClient> {
|
||||
Ok(OciRegistryClient {
|
||||
agent: Client::new(),
|
||||
url,
|
||||
@ -138,6 +140,7 @@ impl OciRegistryClient {
|
||||
name: N,
|
||||
descriptor: &Descriptor,
|
||||
mut dest: File,
|
||||
progress: Option<OciBoundProgress>,
|
||||
) -> Result<u64> {
|
||||
let url = self.url.join(&format!(
|
||||
"/v2/{}/blobs/{}",
|
||||
@ -146,9 +149,25 @@ impl OciRegistryClient {
|
||||
))?;
|
||||
let mut response = self.call(self.agent.get(url.as_str())).await?;
|
||||
let mut size: u64 = 0;
|
||||
let mut last_progress_size: u64 = 0;
|
||||
while let Some(chunk) = response.chunk().await? {
|
||||
dest.write_all(&chunk).await?;
|
||||
size += chunk.len() as u64;
|
||||
|
||||
if (size - last_progress_size) > (5 * 1024 * 1024) {
|
||||
last_progress_size = size;
|
||||
if let Some(ref progress) = progress {
|
||||
progress
|
||||
.update(|progress| {
|
||||
progress.downloading_layer(
|
||||
descriptor.digest(),
|
||||
size as usize,
|
||||
descriptor.size() as usize,
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(size)
|
||||
}
|
||||
|
261
crates/oci/src/vfs.rs
Normal file
261
crates/oci/src/vfs.rs
Normal file
@ -0,0 +1,261 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
|
||||
};
|
||||
use tokio_tar::{Builder, Entry, EntryType, Header};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum VfsNodeType {
|
||||
Directory,
|
||||
RegularFile,
|
||||
Symlink,
|
||||
Hardlink,
|
||||
Fifo,
|
||||
CharDevice,
|
||||
BlockDevice,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VfsNode {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub children: Vec<VfsNode>,
|
||||
pub typ: VfsNodeType,
|
||||
pub uid: u64,
|
||||
pub gid: u64,
|
||||
pub link_name: Option<String>,
|
||||
pub mode: u32,
|
||||
pub mtime: u64,
|
||||
pub dev_major: Option<u32>,
|
||||
pub dev_minor: Option<u32>,
|
||||
pub disk_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl VfsNode {
|
||||
pub fn from<X: AsyncRead + Unpin>(entry: &Entry<X>) -> Result<VfsNode> {
|
||||
let header = entry.header();
|
||||
let name = entry
|
||||
.path()?
|
||||
.file_name()
|
||||
.ok_or(anyhow!("unable to get file name for entry"))?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
let typ = header.entry_type();
|
||||
let vtype = if typ.is_symlink() {
|
||||
VfsNodeType::Symlink
|
||||
} else if typ.is_hard_link() {
|
||||
VfsNodeType::Hardlink
|
||||
} else if typ.is_dir() {
|
||||
VfsNodeType::Directory
|
||||
} else if typ.is_fifo() {
|
||||
VfsNodeType::Fifo
|
||||
} else if typ.is_block_special() {
|
||||
VfsNodeType::BlockDevice
|
||||
} else if typ.is_character_special() {
|
||||
VfsNodeType::CharDevice
|
||||
} else if typ.is_file() {
|
||||
VfsNodeType::RegularFile
|
||||
} else {
|
||||
return Err(anyhow!("unable to determine vfs type for entry"));
|
||||
};
|
||||
|
||||
Ok(VfsNode {
|
||||
name,
|
||||
size: header.size()?,
|
||||
children: vec![],
|
||||
typ: vtype,
|
||||
uid: header.uid()?,
|
||||
gid: header.gid()?,
|
||||
link_name: header.link_name()?.map(|x| x.to_string_lossy().to_string()),
|
||||
mode: header.mode()?,
|
||||
mtime: header.mtime()?,
|
||||
dev_major: header.device_major()?,
|
||||
dev_minor: header.device_minor()?,
|
||||
disk_path: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn lookup(&self, path: &Path) -> Option<&VfsNode> {
|
||||
let mut node = self;
|
||||
for part in path {
|
||||
node = node
|
||||
.children
|
||||
.iter()
|
||||
.find(|child| child.name == part.to_string_lossy())?;
|
||||
}
|
||||
Some(node)
|
||||
}
|
||||
|
||||
pub fn lookup_mut(&mut self, path: &Path) -> Option<&mut VfsNode> {
|
||||
let mut node = self;
|
||||
for part in path {
|
||||
node = node
|
||||
.children
|
||||
.iter_mut()
|
||||
.find(|child| child.name == part.to_string_lossy())?;
|
||||
}
|
||||
Some(node)
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, path: &Path) -> Option<(&mut VfsNode, VfsNode)> {
|
||||
let parent = path.parent()?;
|
||||
let node = self.lookup_mut(parent)?;
|
||||
let file_name = path.file_name()?;
|
||||
let file_name = file_name.to_string_lossy();
|
||||
let position = node
|
||||
.children
|
||||
.iter()
|
||||
.position(|child| file_name == child.name)?;
|
||||
let removed = node.children.remove(position);
|
||||
Some((node, removed))
|
||||
}
|
||||
|
||||
pub fn create_tar_header(&self) -> Result<Header> {
|
||||
let mut header = Header::new_ustar();
|
||||
header.set_entry_type(match self.typ {
|
||||
VfsNodeType::Directory => EntryType::Directory,
|
||||
VfsNodeType::CharDevice => EntryType::Char,
|
||||
VfsNodeType::BlockDevice => EntryType::Block,
|
||||
VfsNodeType::Fifo => EntryType::Fifo,
|
||||
VfsNodeType::Hardlink => EntryType::Link,
|
||||
VfsNodeType::Symlink => EntryType::Symlink,
|
||||
VfsNodeType::RegularFile => EntryType::Regular,
|
||||
});
|
||||
header.set_uid(self.uid);
|
||||
header.set_gid(self.gid);
|
||||
|
||||
if let Some(device_major) = self.dev_major {
|
||||
header.set_device_major(device_major)?;
|
||||
}
|
||||
|
||||
if let Some(device_minor) = self.dev_minor {
|
||||
header.set_device_minor(device_minor)?;
|
||||
}
|
||||
header.set_mtime(self.mtime);
|
||||
header.set_mode(self.mode);
|
||||
|
||||
if let Some(link_name) = self.link_name.as_ref() {
|
||||
header.set_link_name(&PathBuf::from(link_name))?;
|
||||
}
|
||||
header.set_size(self.size);
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send>(
|
||||
&self,
|
||||
path: &Path,
|
||||
builder: &mut Builder<W>,
|
||||
) -> Result<()> {
|
||||
let mut header = self.create_tar_header()?;
|
||||
header.set_path(path)?;
|
||||
header.set_cksum();
|
||||
if let Some(disk_path) = self.disk_path.as_ref() {
|
||||
builder
|
||||
.append(&header, File::open(disk_path).await?)
|
||||
.await?;
|
||||
} else {
|
||||
builder.append(&header, &[] as &[u8]).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VfsTree {
|
||||
pub root: VfsNode,
|
||||
}
|
||||
|
||||
impl Default for VfsTree {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl VfsTree {
|
||||
pub fn new() -> VfsTree {
|
||||
VfsTree {
|
||||
root: VfsNode {
|
||||
name: "".to_string(),
|
||||
size: 0,
|
||||
children: vec![],
|
||||
typ: VfsNodeType::Directory,
|
||||
uid: 0,
|
||||
gid: 0,
|
||||
link_name: None,
|
||||
mode: 0,
|
||||
mtime: 0,
|
||||
dev_major: None,
|
||||
dev_minor: None,
|
||||
disk_path: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_tar_entry<X: AsyncRead + Unpin>(&mut self, entry: &Entry<X>) -> Result<()> {
|
||||
let mut meta = VfsNode::from(entry)?;
|
||||
let path = entry.path()?.to_path_buf();
|
||||
let parent = if let Some(parent) = path.parent() {
|
||||
self.root.lookup_mut(parent)
|
||||
} else {
|
||||
Some(&mut self.root)
|
||||
};
|
||||
|
||||
let Some(parent) = parent else {
|
||||
return Err(anyhow!("unable to find parent of entry"));
|
||||
};
|
||||
|
||||
let position = parent
|
||||
.children
|
||||
.iter()
|
||||
.position(|child| meta.name == child.name);
|
||||
|
||||
if let Some(position) = position {
|
||||
let old = parent.children.remove(position);
|
||||
if meta.typ == VfsNodeType::Directory {
|
||||
meta.children = old.children;
|
||||
}
|
||||
}
|
||||
parent.children.push(meta);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_disk_path(&mut self, path: &Path, disk_path: &Path) -> Result<()> {
|
||||
let Some(node) = self.root.lookup_mut(path) else {
|
||||
return Err(anyhow!(
|
||||
"unable to find node {:?} to set disk path to",
|
||||
path
|
||||
));
|
||||
};
|
||||
node.disk_path = Some(disk_path.to_path_buf());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn write_to_tar<W: AsyncWrite + Unpin + Send + 'static>(
|
||||
&self,
|
||||
write: W,
|
||||
) -> Result<()> {
|
||||
let mut builder = Builder::new(write);
|
||||
let mut queue = vec![(PathBuf::from(""), &self.root)];
|
||||
|
||||
while !queue.is_empty() {
|
||||
let (mut path, node) = queue.remove(0);
|
||||
if !node.name.is_empty() {
|
||||
path.push(&node.name);
|
||||
}
|
||||
if path.components().count() != 0 {
|
||||
node.write_to_tar(&path, &mut builder).await?;
|
||||
}
|
||||
for child in &node.children {
|
||||
queue.push((path.clone(), child));
|
||||
}
|
||||
}
|
||||
|
||||
let mut write = builder.into_inner().await?;
|
||||
write.flush().await?;
|
||||
drop(write);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@
|
||||
name = "krata-runtime"
|
||||
description = "Runtime for running guests on the krata hypervisor."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -12,18 +12,18 @@ resolver = "2"
|
||||
anyhow = { workspace = true }
|
||||
backhand = { workspace = true }
|
||||
ipnetwork = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.3" }
|
||||
krata = { path = "../krata", version = "^0.0.9" }
|
||||
krata-advmac = { workspace = true }
|
||||
krata-oci = { path = "../oci", version = "^0.0.3" }
|
||||
krata-oci = { path = "../oci", version = "^0.0.9" }
|
||||
log = { workspace = true }
|
||||
loopdev-3 = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.3" }
|
||||
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.3" }
|
||||
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.3" }
|
||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.3" }
|
||||
krata-xenclient = { path = "../xen/xenclient", version = "^0.0.9" }
|
||||
krata-xenevtchn = { path = "../xen/xenevtchn", version = "^0.0.9" }
|
||||
krata-xengnt = { path = "../xen/xengnt", version = "^0.0.9" }
|
||||
krata-xenstore = { path = "../xen/xenstore", version = "^0.0.9" }
|
||||
|
||||
[lib]
|
||||
name = "kratart"
|
||||
@ -31,10 +31,6 @@ name = "kratart"
|
||||
[dev-dependencies]
|
||||
env_logger = { workspace = true }
|
||||
|
||||
[[example]]
|
||||
name = "kratart-squashify"
|
||||
path = "examples/squashify.rs"
|
||||
|
||||
[[example]]
|
||||
name = "kratart-channel"
|
||||
path = "examples/channel.rs"
|
||||
|
@ -6,7 +6,7 @@ use kratart::channel::ChannelService;
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
|
||||
|
||||
let (service, mut receiver) = ChannelService::new("krata-channel".to_string()).await?;
|
||||
let (service, _, mut receiver) = ChannelService::new("krata-channel".to_string(), None).await?;
|
||||
let task = service.launch().await?;
|
||||
|
||||
loop {
|
||||
|
@ -1,29 +0,0 @@
|
||||
use std::{env::args, path::PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use env_logger::Env;
|
||||
use krataoci::{cache::ImageCache, compiler::ImageCompiler, name::ImageName};
|
||||
use tokio::fs;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
|
||||
|
||||
let image = ImageName::parse(&args().nth(1).unwrap())?;
|
||||
let seed = args().nth(2).map(PathBuf::from);
|
||||
|
||||
let cache_dir = PathBuf::from("krata-cache");
|
||||
if !cache_dir.exists() {
|
||||
fs::create_dir(&cache_dir).await?;
|
||||
}
|
||||
|
||||
let cache = ImageCache::new(&cache_dir)?;
|
||||
let compiler = ImageCompiler::new(&cache, seed)?;
|
||||
let info = compiler.compile(&image).await?;
|
||||
println!(
|
||||
"generated squashfs of {} to {}",
|
||||
image,
|
||||
info.image_squashfs.to_string_lossy()
|
||||
);
|
||||
Ok(())
|
||||
}
|
@ -1,17 +1,25 @@
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::debug;
|
||||
use loopdev::{LoopControl, LoopDevice};
|
||||
use tokio::time::sleep;
|
||||
use xenclient::BlockDeviceRef;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AutoLoop {
|
||||
control: LoopControl,
|
||||
control: Arc<LoopControl>,
|
||||
}
|
||||
|
||||
impl AutoLoop {
|
||||
pub fn new(control: LoopControl) -> AutoLoop {
|
||||
AutoLoop { control }
|
||||
AutoLoop {
|
||||
control: Arc::new(control),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn loopify(&self, file: &str) -> Result<BlockDeviceRef> {
|
||||
debug!("creating loop for file {}", file);
|
||||
let device = self.control.next_free()?;
|
||||
device.with().read_only(true).attach(file)?;
|
||||
let path = device
|
||||
@ -25,9 +33,10 @@ impl AutoLoop {
|
||||
Ok(BlockDeviceRef { path, major, minor })
|
||||
}
|
||||
|
||||
pub fn unloop(&self, device: &str) -> Result<()> {
|
||||
pub async fn unloop(&self, device: &str) -> Result<()> {
|
||||
let device = LoopDevice::open(device)?;
|
||||
device.detach()?;
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use anyhow::Result;
|
||||
use backhand::{FilesystemWriter, NodeHeader};
|
||||
use krata::launchcfg::LaunchInfo;
|
||||
use krataoci::compiler::ImageInfo;
|
||||
use krataoci::packer::OciImagePacked;
|
||||
use log::trace;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
@ -9,28 +9,24 @@ use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct ConfigBlock<'a> {
|
||||
pub image_info: &'a ImageInfo,
|
||||
pub image: &'a OciImagePacked,
|
||||
pub file: PathBuf,
|
||||
pub dir: PathBuf,
|
||||
}
|
||||
|
||||
impl ConfigBlock<'_> {
|
||||
pub fn new<'a>(uuid: &Uuid, image_info: &'a ImageInfo) -> Result<ConfigBlock<'a>> {
|
||||
pub fn new<'a>(uuid: &Uuid, image: &'a OciImagePacked) -> Result<ConfigBlock<'a>> {
|
||||
let mut dir = std::env::temp_dir().clone();
|
||||
dir.push(format!("krata-cfg-{}", uuid));
|
||||
fs::create_dir_all(&dir)?;
|
||||
let mut file = dir.clone();
|
||||
file.push("config.squashfs");
|
||||
Ok(ConfigBlock {
|
||||
image_info,
|
||||
file,
|
||||
dir,
|
||||
})
|
||||
Ok(ConfigBlock { image, file, dir })
|
||||
}
|
||||
|
||||
pub fn build(&self, launch_config: &LaunchInfo) -> Result<()> {
|
||||
trace!("build launch_config={:?}", launch_config);
|
||||
let manifest = self.image_info.config.to_string()?;
|
||||
let manifest = self.image.config.to_string()?;
|
||||
let launch = serde_json::to_string(launch_config)?;
|
||||
let mut writer = FilesystemWriter::default();
|
||||
writer.push_dir(
|
||||
|
@ -41,30 +41,40 @@ impl XenConsoleInterface {
|
||||
|
||||
pub struct ChannelService {
|
||||
typ: String,
|
||||
use_reserved_ref: Option<u64>,
|
||||
backends: HashMap<u32, ChannelBackend>,
|
||||
evtchn: EventChannel,
|
||||
store: XsdClient,
|
||||
gnttab: GrantTab,
|
||||
input_receiver: Receiver<(u32, Vec<u8>)>,
|
||||
pub input_sender: Sender<(u32, Vec<u8>)>,
|
||||
output_sender: Sender<(u32, Vec<u8>)>,
|
||||
output_sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||
}
|
||||
|
||||
impl ChannelService {
|
||||
pub async fn new(typ: String) -> Result<(ChannelService, Receiver<(u32, Vec<u8>)>)> {
|
||||
pub async fn new(
|
||||
typ: String,
|
||||
use_reserved_ref: Option<u64>,
|
||||
) -> Result<(
|
||||
ChannelService,
|
||||
Sender<(u32, Vec<u8>)>,
|
||||
Receiver<(u32, Option<Vec<u8>>)>,
|
||||
)> {
|
||||
let (input_sender, input_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
||||
let (output_sender, output_receiver) = channel(GROUPED_CHANNEL_QUEUE_LEN);
|
||||
Ok((
|
||||
ChannelService {
|
||||
typ,
|
||||
use_reserved_ref,
|
||||
backends: HashMap::new(),
|
||||
evtchn: EventChannel::open().await?,
|
||||
store: XsdClient::open().await?,
|
||||
gnttab: GrantTab::open()?,
|
||||
input_sender,
|
||||
input_sender: input_sender.clone(),
|
||||
input_receiver,
|
||||
output_sender,
|
||||
},
|
||||
input_sender,
|
||||
output_receiver,
|
||||
))
|
||||
}
|
||||
@ -148,6 +158,7 @@ impl ChannelService {
|
||||
self.evtchn.clone(),
|
||||
self.gnttab.clone(),
|
||||
self.output_sender.clone(),
|
||||
self.use_reserved_ref,
|
||||
)
|
||||
.await?;
|
||||
self.backends.insert(domid, backend);
|
||||
@ -192,12 +203,14 @@ pub struct ChannelBackend {
|
||||
pub domid: u32,
|
||||
pub id: u32,
|
||||
pub sender: Sender<Vec<u8>>,
|
||||
raw_sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||
task: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Drop for ChannelBackend {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
let _ = self.raw_sender.try_send((self.domid, None));
|
||||
debug!(
|
||||
"destroyed channel backend for domain {} channel {}",
|
||||
self.domid, self.id
|
||||
@ -215,7 +228,8 @@ impl ChannelBackend {
|
||||
store: XsdClient,
|
||||
evtchn: EventChannel,
|
||||
gnttab: GrantTab,
|
||||
output_sender: Sender<(u32, Vec<u8>)>,
|
||||
output_sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||
use_reserved_ref: Option<u64>,
|
||||
) -> Result<ChannelBackend> {
|
||||
let processor = KrataChannelBackendProcessor {
|
||||
backend,
|
||||
@ -225,15 +239,19 @@ impl ChannelBackend {
|
||||
store,
|
||||
evtchn,
|
||||
gnttab,
|
||||
use_reserved_ref,
|
||||
};
|
||||
|
||||
let (input_sender, input_receiver) = channel(SINGLE_CHANNEL_QUEUE_LEN);
|
||||
|
||||
let task = processor.launch(output_sender, input_receiver).await?;
|
||||
let task = processor
|
||||
.launch(output_sender.clone(), input_receiver)
|
||||
.await?;
|
||||
Ok(ChannelBackend {
|
||||
domid,
|
||||
id,
|
||||
task,
|
||||
raw_sender: output_sender,
|
||||
sender: input_sender,
|
||||
})
|
||||
}
|
||||
@ -241,6 +259,7 @@ impl ChannelBackend {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct KrataChannelBackendProcessor {
|
||||
use_reserved_ref: Option<u64>,
|
||||
backend: String,
|
||||
frontend: String,
|
||||
id: u32,
|
||||
@ -290,7 +309,7 @@ impl KrataChannelBackendProcessor {
|
||||
|
||||
async fn launch(
|
||||
&self,
|
||||
output_sender: Sender<(u32, Vec<u8>)>,
|
||||
output_sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||
input_receiver: Receiver<Vec<u8>>,
|
||||
) -> Result<JoinHandle<()>> {
|
||||
let owned = self.clone();
|
||||
@ -307,7 +326,7 @@ impl KrataChannelBackendProcessor {
|
||||
|
||||
async fn processor(
|
||||
&self,
|
||||
sender: Sender<(u32, Vec<u8>)>,
|
||||
sender: Sender<(u32, Option<Vec<u8>>)>,
|
||||
mut receiver: Receiver<Vec<u8>>,
|
||||
) -> Result<()> {
|
||||
self.init().await?;
|
||||
@ -332,7 +351,7 @@ impl KrataChannelBackendProcessor {
|
||||
.read_string(format!("{}/port", self.frontend))
|
||||
.await?;
|
||||
|
||||
if (ring_ref.is_none() || port.is_none()) && tries < 10 {
|
||||
if (ring_ref.is_none() || port.is_none()) && tries < 40 {
|
||||
tries += 1;
|
||||
self.store
|
||||
.write_string(format!("{}/state", self.backend), "4")
|
||||
@ -347,7 +366,7 @@ impl KrataChannelBackendProcessor {
|
||||
return Err(anyhow!("frontend did not give ring-ref and port"));
|
||||
}
|
||||
|
||||
let Ok(ring_ref) = ring_ref.unwrap().parse::<u64>() else {
|
||||
let Ok(mut ring_ref) = ring_ref.unwrap().parse::<u64>() else {
|
||||
return Err(anyhow!("frontend gave invalid ring-ref"));
|
||||
};
|
||||
|
||||
@ -355,6 +374,8 @@ impl KrataChannelBackendProcessor {
|
||||
return Err(anyhow!("frontend gave invalid port"));
|
||||
};
|
||||
|
||||
ring_ref = self.use_reserved_ref.unwrap_or(ring_ref);
|
||||
|
||||
break (ring_ref, port);
|
||||
}
|
||||
}
|
||||
@ -380,7 +401,7 @@ impl KrataChannelBackendProcessor {
|
||||
unsafe {
|
||||
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;
|
||||
if !buffer.is_empty() {
|
||||
sender.send((self.domid, buffer)).await?;
|
||||
sender.send((self.domid, Some(buffer))).await?;
|
||||
}
|
||||
};
|
||||
|
||||
@ -427,6 +448,10 @@ impl KrataChannelBackendProcessor {
|
||||
error!("channel for domid {} has an invalid input space of {}", self.domid, space);
|
||||
}
|
||||
let free = XenConsoleInterface::INPUT_SIZE.wrapping_sub(space);
|
||||
if free == 0 {
|
||||
sleep(Duration::from_micros(100)).await;
|
||||
continue;
|
||||
}
|
||||
let want = data.len().min(free);
|
||||
let buffer = &data[index..want];
|
||||
for b in buffer {
|
||||
@ -450,7 +475,7 @@ impl KrataChannelBackendProcessor {
|
||||
unsafe {
|
||||
let buffer = self.read_output_buffer(channel.local_port, &memory).await?;
|
||||
if !buffer.is_empty() {
|
||||
sender.send((self.domid, buffer)).await?;
|
||||
sender.send((self.domid, Some(buffer))).await?;
|
||||
}
|
||||
};
|
||||
channel.unmask_sender.send(channel.local_port).await?;
|
||||
|
@ -1,18 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use tokio::fs::File;
|
||||
|
||||
pub struct XenConsole {
|
||||
pub read_handle: File,
|
||||
pub write_handle: File,
|
||||
}
|
||||
|
||||
impl XenConsole {
|
||||
pub async fn new(tty: &str) -> Result<XenConsole> {
|
||||
let read_handle = File::options().read(true).write(false).open(tty).await?;
|
||||
let write_handle = File::options().read(false).write(true).open(tty).await?;
|
||||
Ok(XenConsole {
|
||||
read_handle,
|
||||
write_handle,
|
||||
})
|
||||
}
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::sync::Arc;
|
||||
use std::{fs, net::Ipv4Addr, str::FromStr};
|
||||
|
||||
use advmac::MacAddr6;
|
||||
@ -7,48 +8,47 @@ use anyhow::{anyhow, Result};
|
||||
use ipnetwork::{IpNetwork, Ipv4Network};
|
||||
use krata::launchcfg::{
|
||||
LaunchInfo, LaunchNetwork, LaunchNetworkIpv4, LaunchNetworkIpv6, LaunchNetworkResolver,
|
||||
LaunchPackedFormat, LaunchRoot,
|
||||
};
|
||||
use krataoci::packer::OciImagePacked;
|
||||
use tokio::sync::Semaphore;
|
||||
use uuid::Uuid;
|
||||
use xenclient::{DomainChannel, DomainConfig, DomainDisk, DomainNetworkInterface};
|
||||
use xenstore::XsdInterface;
|
||||
|
||||
use crate::cfgblk::ConfigBlock;
|
||||
use crate::RuntimeContext;
|
||||
use krataoci::{
|
||||
cache::ImageCache,
|
||||
compiler::{ImageCompiler, ImageInfo},
|
||||
name::ImageName,
|
||||
};
|
||||
|
||||
use super::{GuestInfo, GuestState};
|
||||
|
||||
pub struct GuestLaunchRequest<'a> {
|
||||
pub struct GuestLaunchRequest {
|
||||
pub format: LaunchPackedFormat,
|
||||
pub uuid: Option<Uuid>,
|
||||
pub name: Option<&'a str>,
|
||||
pub image: &'a str,
|
||||
pub name: Option<String>,
|
||||
pub vcpus: u32,
|
||||
pub mem: u64,
|
||||
pub env: HashMap<String, String>,
|
||||
pub run: Option<Vec<String>>,
|
||||
pub debug: bool,
|
||||
pub image: OciImagePacked,
|
||||
}
|
||||
|
||||
pub struct GuestLauncher {}
|
||||
pub struct GuestLauncher {
|
||||
pub launch_semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl GuestLauncher {
|
||||
pub fn new() -> Result<Self> {
|
||||
Ok(Self {})
|
||||
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
|
||||
Ok(Self { launch_semaphore })
|
||||
}
|
||||
|
||||
pub async fn launch<'r>(
|
||||
pub async fn launch(
|
||||
&mut self,
|
||||
context: &mut RuntimeContext,
|
||||
request: GuestLaunchRequest<'r>,
|
||||
context: &RuntimeContext,
|
||||
request: GuestLaunchRequest,
|
||||
) -> Result<GuestInfo> {
|
||||
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
||||
let xen_name = format!("krata-{uuid}");
|
||||
let image_info = self.compile(request.image, &context.image_cache).await?;
|
||||
|
||||
let mut gateway_mac = MacAddr6::random();
|
||||
gateway_mac.set_local(true);
|
||||
gateway_mac.set_multicast(false);
|
||||
@ -56,6 +56,7 @@ impl GuestLauncher {
|
||||
container_mac.set_local(true);
|
||||
container_mac.set_multicast(false);
|
||||
|
||||
let _launch_permit = self.launch_semaphore.acquire().await?;
|
||||
let guest_ipv4 = self.allocate_ipv4(context).await?;
|
||||
let guest_ipv6 = container_mac.to_link_local_ipv6();
|
||||
let gateway_ipv4 = "10.75.70.1";
|
||||
@ -64,6 +65,16 @@ impl GuestLauncher {
|
||||
let ipv6_network_mask: u32 = 10;
|
||||
|
||||
let launch_config = LaunchInfo {
|
||||
root: LaunchRoot {
|
||||
format: request.format.clone(),
|
||||
},
|
||||
hostname: Some(
|
||||
request
|
||||
.name
|
||||
.as_ref()
|
||||
.map(|x| x.to_string())
|
||||
.unwrap_or_else(|| format!("krata-{}", uuid)),
|
||||
),
|
||||
network: Some(LaunchNetwork {
|
||||
link: "eth0".to_string(),
|
||||
ipv4: LaunchNetworkIpv4 {
|
||||
@ -87,13 +98,14 @@ impl GuestLauncher {
|
||||
run: request.run,
|
||||
};
|
||||
|
||||
let cfgblk = ConfigBlock::new(&uuid, &image_info)?;
|
||||
let cfgblk = ConfigBlock::new(&uuid, &request.image)?;
|
||||
cfgblk.build(&launch_config)?;
|
||||
|
||||
let image_squashfs_path = image_info
|
||||
.image_squashfs
|
||||
let image_squashfs_path = request
|
||||
.image
|
||||
.path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert image squashfs path to string"))?;
|
||||
.ok_or_else(|| anyhow!("failed to convert image path to string"))?;
|
||||
|
||||
let cfgblk_dir_path = cfgblk
|
||||
.dir
|
||||
@ -129,7 +141,6 @@ impl GuestLauncher {
|
||||
cfgblk_dir_path,
|
||||
),
|
||||
),
|
||||
("krata/image".to_string(), request.image.to_string()),
|
||||
(
|
||||
"krata/network/guest/ipv4".to_string(),
|
||||
format!("{}/{}", guest_ipv4, ipv4_network_mask),
|
||||
@ -156,8 +167,8 @@ impl GuestLauncher {
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(name) = request.name {
|
||||
extra_keys.push(("krata/name".to_string(), name.to_string()));
|
||||
if let Some(name) = request.name.as_ref() {
|
||||
extra_keys.push(("krata/name".to_string(), name.clone()));
|
||||
}
|
||||
|
||||
let config = DomainConfig {
|
||||
@ -168,6 +179,7 @@ impl GuestLauncher {
|
||||
kernel_path: &context.kernel,
|
||||
initrd_path: &context.initrd,
|
||||
cmdline: &cmdline,
|
||||
use_console_backend: Some("krata-console"),
|
||||
disks: vec![
|
||||
DomainDisk {
|
||||
vdev: "xvda",
|
||||
@ -197,10 +209,10 @@ impl GuestLauncher {
|
||||
};
|
||||
match context.xen.create(&config).await {
|
||||
Ok(created) => Ok(GuestInfo {
|
||||
name: request.name.map(|x| x.to_string()),
|
||||
name: request.name.as_ref().map(|x| x.to_string()),
|
||||
uuid,
|
||||
domid: created.domid,
|
||||
image: request.image.to_string(),
|
||||
image: request.image.digest,
|
||||
loops: vec![],
|
||||
guest_ipv4: Some(IpNetwork::new(
|
||||
IpAddr::V4(guest_ipv4),
|
||||
@ -217,27 +229,21 @@ impl GuestLauncher {
|
||||
)?),
|
||||
gateway_ipv6: Some(IpNetwork::new(
|
||||
IpAddr::V6(Ipv6Addr::from_str(gateway_ipv6)?),
|
||||
ipv4_network_mask as u8,
|
||||
ipv6_network_mask as u8,
|
||||
)?),
|
||||
gateway_mac: Some(gateway_mac_string.clone()),
|
||||
state: GuestState { exit_code: None },
|
||||
}),
|
||||
Err(error) => {
|
||||
let _ = context.autoloop.unloop(&image_squashfs_loop.path);
|
||||
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path);
|
||||
let _ = context.autoloop.unloop(&image_squashfs_loop.path).await;
|
||||
let _ = context.autoloop.unloop(&cfgblk_squashfs_loop.path).await;
|
||||
let _ = fs::remove_dir(&cfgblk.dir);
|
||||
Err(error.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn compile(&self, image: &str, image_cache: &ImageCache) -> Result<ImageInfo> {
|
||||
let image = ImageName::parse(image)?;
|
||||
let compiler = ImageCompiler::new(image_cache, None)?;
|
||||
compiler.compile(&image).await
|
||||
}
|
||||
|
||||
async fn allocate_ipv4(&mut self, context: &mut RuntimeContext) -> Result<Ipv4Addr> {
|
||||
async fn allocate_ipv4(&self, context: &RuntimeContext) -> Result<Ipv4Addr> {
|
||||
let network = Ipv4Network::new(Ipv4Addr::new(10, 75, 80, 0), 24)?;
|
||||
let mut used: Vec<Ipv4Addr> = vec![];
|
||||
for domid_candidate in context.xen.store.list("/local/domain").await? {
|
||||
@ -264,7 +270,7 @@ impl GuestLauncher {
|
||||
|
||||
if found.is_none() {
|
||||
return Err(anyhow!(
|
||||
"unable to find ipv4 to allocate to container, ipv4 addresses are exhausted"
|
||||
"unable to find ipv4 to allocate to guest, ipv4 addresses are exhausted"
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -8,22 +8,19 @@ use std::{
|
||||
use anyhow::{anyhow, Result};
|
||||
use ipnetwork::IpNetwork;
|
||||
use loopdev::LoopControl;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Semaphore;
|
||||
use uuid::Uuid;
|
||||
use xenclient::XenClient;
|
||||
use xenstore::{XsdClient, XsdInterface};
|
||||
|
||||
use self::{
|
||||
autoloop::AutoLoop,
|
||||
console::XenConsole,
|
||||
launch::{GuestLaunchRequest, GuestLauncher},
|
||||
};
|
||||
use krataoci::cache::ImageCache;
|
||||
|
||||
pub mod autoloop;
|
||||
pub mod cfgblk;
|
||||
pub mod channel;
|
||||
pub mod console;
|
||||
pub mod launch;
|
||||
|
||||
pub struct GuestLoopInfo {
|
||||
@ -51,8 +48,8 @@ pub struct GuestInfo {
|
||||
pub state: GuestState,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RuntimeContext {
|
||||
pub image_cache: ImageCache,
|
||||
pub autoloop: AutoLoop,
|
||||
pub xen: XenClient,
|
||||
pub kernel: String,
|
||||
@ -68,12 +65,10 @@ impl RuntimeContext {
|
||||
let xen = XenClient::open(0).await?;
|
||||
image_cache_path.push("image");
|
||||
fs::create_dir_all(&image_cache_path)?;
|
||||
let image_cache = ImageCache::new(&image_cache_path)?;
|
||||
let kernel = RuntimeContext::detect_guest_file(&store, "kernel")?;
|
||||
let initrd = RuntimeContext::detect_guest_file(&store, "initrd")?;
|
||||
|
||||
Ok(RuntimeContext {
|
||||
image_cache,
|
||||
autoloop: AutoLoop::new(LoopControl::open()?),
|
||||
xen,
|
||||
kernel,
|
||||
@ -94,7 +89,7 @@ impl RuntimeContext {
|
||||
Err(anyhow!("unable to find required guest file: {}", name))
|
||||
}
|
||||
|
||||
pub async fn list(&mut self) -> Result<Vec<GuestInfo>> {
|
||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
||||
let mut guests: Vec<GuestInfo> = Vec::new();
|
||||
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
||||
if domid_candidate == "0" {
|
||||
@ -218,7 +213,7 @@ impl RuntimeContext {
|
||||
Ok(guests)
|
||||
}
|
||||
|
||||
pub async fn resolve(&mut self, uuid: Uuid) -> Result<Option<GuestInfo>> {
|
||||
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<GuestInfo>> {
|
||||
for guest in self.list().await? {
|
||||
if guest.uuid == uuid {
|
||||
return Ok(Some(guest));
|
||||
@ -254,7 +249,8 @@ impl RuntimeContext {
|
||||
#[derive(Clone)]
|
||||
pub struct Runtime {
|
||||
store: Arc<String>,
|
||||
context: Arc<Mutex<RuntimeContext>>,
|
||||
context: RuntimeContext,
|
||||
launch_semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl Runtime {
|
||||
@ -262,24 +258,24 @@ impl Runtime {
|
||||
let context = RuntimeContext::new(store.clone()).await?;
|
||||
Ok(Self {
|
||||
store: Arc::new(store),
|
||||
context: Arc::new(Mutex::new(context)),
|
||||
context,
|
||||
launch_semaphore: Arc::new(Semaphore::new(1)),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn launch<'a>(&self, request: GuestLaunchRequest<'a>) -> Result<GuestInfo> {
|
||||
let mut context = self.context.lock().await;
|
||||
let mut launcher = GuestLauncher::new()?;
|
||||
launcher.launch(&mut context, request).await
|
||||
pub async fn launch(&self, request: GuestLaunchRequest) -> Result<GuestInfo> {
|
||||
let mut launcher = GuestLauncher::new(self.launch_semaphore.clone())?;
|
||||
launcher.launch(&self.context, request).await
|
||||
}
|
||||
|
||||
pub async fn destroy(&self, uuid: Uuid) -> Result<Uuid> {
|
||||
let mut context = self.context.lock().await;
|
||||
let info = context
|
||||
let info = self
|
||||
.context
|
||||
.resolve(uuid)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
|
||||
let domid = info.domid;
|
||||
let mut store = XsdClient::open().await?;
|
||||
let store = XsdClient::open().await?;
|
||||
let dom_path = store.get_domain_path(domid).await?;
|
||||
let uuid = match store
|
||||
.read_string(format!("{}/krata/uuid", dom_path).as_str())
|
||||
@ -301,9 +297,9 @@ impl Runtime {
|
||||
.read_string(format!("{}/krata/loops", dom_path).as_str())
|
||||
.await?;
|
||||
let loops = RuntimeContext::parse_loop_set(&loops);
|
||||
context.xen.destroy(domid).await?;
|
||||
self.context.xen.destroy(domid).await?;
|
||||
for info in &loops {
|
||||
context.autoloop.unloop(&info.device)?;
|
||||
self.context.autoloop.unloop(&info.device).await?;
|
||||
match &info.delete {
|
||||
None => {}
|
||||
Some(delete) => {
|
||||
@ -319,20 +315,8 @@ impl Runtime {
|
||||
Ok(uuid)
|
||||
}
|
||||
|
||||
pub async fn console(&self, uuid: Uuid) -> Result<XenConsole> {
|
||||
let mut context = self.context.lock().await;
|
||||
let info = context
|
||||
.resolve(uuid)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
|
||||
let domid = info.domid;
|
||||
let tty = context.xen.get_console_path(domid).await?;
|
||||
XenConsole::new(&tty).await
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
||||
let mut context = self.context.lock().await;
|
||||
context.list().await
|
||||
self.context.list().await
|
||||
}
|
||||
|
||||
pub async fn dupe(&self) -> Result<Runtime> {
|
||||
|
@ -2,7 +2,7 @@
|
||||
name = "krata-xencall"
|
||||
description = "An implementation of direct interfacing to xen privcmd for krata."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
@ -13,6 +13,7 @@ libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
nix = { workspace = true, features = ["ioctl"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[lib]
|
||||
|
@ -2,11 +2,12 @@ use xencall::error::Result;
|
||||
use xencall::sys::CreateDomain;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let domid = call.create_domain(CreateDomain::default())?;
|
||||
let domid = call.create_domain(CreateDomain::default()).await?;
|
||||
println!("created domain {}", domid);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let info = call.get_domain_info(1)?;
|
||||
let info = call.get_domain_info(1).await?;
|
||||
println!("{:?}", info);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let context = call.get_vcpu_context(224, 0)?;
|
||||
let context = call.get_vcpu_context(224, 0).await?;
|
||||
println!("{:?}", context);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use xencall::error::Result;
|
||||
use xencall::XenCall;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let call = XenCall::open(0)?;
|
||||
let info = call.get_version_capabilities()?;
|
||||
let info = call.get_version_capabilities().await?;
|
||||
println!("{:?}", info);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -8,8 +8,12 @@ pub enum Error {
|
||||
Kernel(#[from] nix::errno::Errno),
|
||||
#[error("io issue encountered: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
#[error("failed to acquire semaphore: {0}")]
|
||||
AcquireSemaphoreFailed(#[from] tokio::sync::AcquireError),
|
||||
#[error("populate physmap failed")]
|
||||
PopulatePhysmapFailed,
|
||||
#[error("mmap batch failed: {0}")]
|
||||
MmapBatchFailed(nix::errno::Errno),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
@ -18,15 +18,19 @@ use libc::{c_int, mmap, usleep, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
|
||||
use log::trace;
|
||||
use nix::errno::Errno;
|
||||
use std::ffi::{c_long, c_uint, c_ulong, c_void};
|
||||
use std::sync::Arc;
|
||||
use sys::{XEN_DOMCTL_MAX_INTERFACE_VERSION, XEN_DOMCTL_MIN_INTERFACE_VERSION};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::ptr::addr_of_mut;
|
||||
use std::slice;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct XenCall {
|
||||
pub handle: File,
|
||||
pub handle: Arc<File>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
domctl_interface_version: u32,
|
||||
}
|
||||
|
||||
@ -39,7 +43,8 @@ impl XenCall {
|
||||
let domctl_interface_version =
|
||||
XenCall::detect_domctl_interface_version(&handle, current_domid)?;
|
||||
Ok(XenCall {
|
||||
handle,
|
||||
handle: Arc::new(handle),
|
||||
semaphore: Arc::new(Semaphore::new(1)),
|
||||
domctl_interface_version,
|
||||
})
|
||||
}
|
||||
@ -68,7 +73,8 @@ impl XenCall {
|
||||
Err(Error::XenVersionUnsupported)
|
||||
}
|
||||
|
||||
pub fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
|
||||
pub async fn mmap(&self, addr: u64, len: u64) -> Option<u64> {
|
||||
let _permit = self.semaphore.acquire().await.ok()?;
|
||||
trace!(
|
||||
"call fd={} mmap addr={:#x} len={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -87,12 +93,20 @@ impl XenCall {
|
||||
if ptr == MAP_FAILED {
|
||||
None
|
||||
} else {
|
||||
trace!(
|
||||
"call fd={} mmap addr={:#x} len={} = {:#x}",
|
||||
self.handle.as_raw_fd(),
|
||||
addr,
|
||||
len,
|
||||
ptr as u64,
|
||||
);
|
||||
Some(ptr as u64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
|
||||
pub async fn hypercall(&self, op: c_ulong, arg: [c_ulong; 5]) -> Result<c_long> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
trace!(
|
||||
"call fd={} hypercall op={:#x} arg={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -106,29 +120,29 @@ impl XenCall {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [0, 0, 0, 0, 0])
|
||||
pub async fn hypercall0(&self, op: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [0, 0, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, 0, 0, 0, 0])
|
||||
pub async fn hypercall1(&self, op: c_ulong, arg1: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, 0, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, 0, 0, 0])
|
||||
pub async fn hypercall2(&self, op: c_ulong, arg1: c_ulong, arg2: c_ulong) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, 0, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall3(
|
||||
pub async fn hypercall3(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
arg2: c_ulong,
|
||||
arg3: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, 0, 0])
|
||||
self.hypercall(op, [arg1, arg2, arg3, 0, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall4(
|
||||
pub async fn hypercall4(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
@ -136,10 +150,10 @@ impl XenCall {
|
||||
arg3: c_ulong,
|
||||
arg4: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, 0])
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, 0]).await
|
||||
}
|
||||
|
||||
pub fn hypercall5(
|
||||
pub async fn hypercall5(
|
||||
&self,
|
||||
op: c_ulong,
|
||||
arg1: c_ulong,
|
||||
@ -148,10 +162,10 @@ impl XenCall {
|
||||
arg4: c_ulong,
|
||||
arg5: c_ulong,
|
||||
) -> Result<c_long> {
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5])
|
||||
self.hypercall(op, [arg1, arg2, arg3, arg4, arg5]).await
|
||||
}
|
||||
|
||||
pub fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
|
||||
pub async fn multicall(&self, calls: &mut [MultiCallEntry]) -> Result<()> {
|
||||
trace!(
|
||||
"call fd={} multicall calls={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -161,11 +175,12 @@ impl XenCall {
|
||||
HYPERVISOR_MULTICALL,
|
||||
calls.as_mut_ptr() as c_ulong,
|
||||
calls.len() as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn map_resource(
|
||||
pub async fn map_resource(
|
||||
&self,
|
||||
domid: u32,
|
||||
typ: u32,
|
||||
@ -174,6 +189,7 @@ impl XenCall {
|
||||
num: u64,
|
||||
addr: u64,
|
||||
) -> Result<()> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
let mut resource = MmapResource {
|
||||
dom: domid as u16,
|
||||
typ,
|
||||
@ -188,7 +204,14 @@ impl XenCall {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mmap_batch(&self, domid: u32, num: u64, addr: u64, mfns: Vec<u64>) -> Result<c_long> {
|
||||
pub async fn mmap_batch(
|
||||
&self,
|
||||
domid: u32,
|
||||
num: u64,
|
||||
addr: u64,
|
||||
mfns: Vec<u64>,
|
||||
) -> Result<c_long> {
|
||||
let _permit = self.semaphore.acquire().await?;
|
||||
trace!(
|
||||
"call fd={} mmap_batch domid={} num={} addr={:#x} mfns={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -211,7 +234,7 @@ impl XenCall {
|
||||
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
|
||||
if let Err(errno) = result {
|
||||
if errno != Errno::ENOENT {
|
||||
return Err(errno)?;
|
||||
return Err(Error::MmapBatchFailed(errno))?;
|
||||
}
|
||||
|
||||
usleep(100);
|
||||
@ -246,7 +269,7 @@ impl XenCall {
|
||||
let result = sys::mmapbatch(self.handle.as_raw_fd(), &mut batch);
|
||||
if let Err(n) = result {
|
||||
if n != Errno::ENOENT {
|
||||
return Err(n)?;
|
||||
return Err(Error::MmapBatchFailed(n))?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,7 +289,7 @@ impl XenCall {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
|
||||
pub async fn get_version_capabilities(&self) -> Result<XenCapabilitiesInfo> {
|
||||
trace!(
|
||||
"call fd={} get_version_capabilities",
|
||||
self.handle.as_raw_fd()
|
||||
@ -278,26 +301,29 @@ impl XenCall {
|
||||
HYPERVISOR_XEN_VERSION,
|
||||
XENVER_CAPABILITIES,
|
||||
addr_of_mut!(info) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
pub fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
|
||||
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)?;
|
||||
pub async fn evtchn_op(&self, cmd: c_int, arg: u64) -> Result<()> {
|
||||
self.hypercall2(HYPERVISOR_EVENT_CHANNEL_OP, cmd as c_ulong, arg)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
|
||||
pub async fn evtchn_alloc_unbound(&self, domid: u32, remote_domid: u32) -> Result<u32> {
|
||||
let mut alloc_unbound = EvtChnAllocUnbound {
|
||||
dom: domid as u16,
|
||||
remote_dom: remote_domid as u16,
|
||||
port: 0,
|
||||
};
|
||||
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)?;
|
||||
self.evtchn_op(6, addr_of_mut!(alloc_unbound) as c_ulong)
|
||||
.await?;
|
||||
Ok(alloc_unbound.port)
|
||||
}
|
||||
|
||||
pub fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
|
||||
pub async fn get_domain_info(&self, domid: u32) -> Result<GetDomainInfo> {
|
||||
trace!(
|
||||
"domctl fd={} get_domain_info domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -311,11 +337,12 @@ impl XenCall {
|
||||
get_domain_info: GetDomainInfo::default(),
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(unsafe { domctl.value.get_domain_info })
|
||||
}
|
||||
|
||||
pub fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
|
||||
pub async fn create_domain(&self, create_domain: CreateDomain) -> Result<u32> {
|
||||
trace!(
|
||||
"domctl fd={} create_domain create_domain={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -327,11 +354,12 @@ impl XenCall {
|
||||
domid: 0,
|
||||
value: DomCtlValue { create_domain },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(domctl.domid)
|
||||
}
|
||||
|
||||
pub fn pause_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn pause_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} pause_domain domid={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -343,11 +371,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unpause_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn unpause_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} unpause_domain domid={:?}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -359,11 +388,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
|
||||
pub async fn set_max_mem(&self, domid: u32, memkb: u64) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_max_mem domid={} memkb={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -378,11 +408,12 @@ impl XenCall {
|
||||
max_mem: MaxMem { max_memkb: memkb },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
|
||||
pub async fn set_max_vcpus(&self, domid: u32, max_vcpus: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_max_vcpus domid={} max_vcpus={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -397,11 +428,12 @@ impl XenCall {
|
||||
max_cpus: MaxVcpus { max_vcpus },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
|
||||
pub async fn set_address_size(&self, domid: u32, size: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} set_address_size domid={} size={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -416,11 +448,12 @@ impl XenCall {
|
||||
address_size: AddressSize { size },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
|
||||
pub async fn get_vcpu_context(&self, domid: u32, vcpu: u32) -> Result<VcpuGuestContext> {
|
||||
trace!(
|
||||
"domctl fd={} get_vcpu_context domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -440,11 +473,12 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(unsafe { wrapper.value })
|
||||
}
|
||||
|
||||
pub fn set_vcpu_context(
|
||||
pub async fn set_vcpu_context(
|
||||
&self,
|
||||
domid: u32,
|
||||
vcpu: u32,
|
||||
@ -469,11 +503,12 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
|
||||
pub async fn get_page_frame_info(&self, domid: u32, frames: &[u64]) -> Result<Vec<u64>> {
|
||||
let mut buffer: Vec<u64> = frames.to_vec();
|
||||
let mut domctl = DomCtl {
|
||||
cmd: XEN_DOMCTL_GETPAGEFRAMEINFO3,
|
||||
@ -486,7 +521,8 @@ impl XenCall {
|
||||
},
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
let slice = unsafe {
|
||||
slice::from_raw_parts_mut(
|
||||
domctl.value.get_page_frame_info.array as *mut u64,
|
||||
@ -496,7 +532,7 @@ impl XenCall {
|
||||
Ok(slice.to_vec())
|
||||
}
|
||||
|
||||
pub fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
|
||||
pub async fn hypercall_init(&self, domid: u32, gmfn: u64) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} hypercall_init domid={} gmfn={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -511,11 +547,12 @@ impl XenCall {
|
||||
hypercall_init: HypercallInit { gmfn },
|
||||
},
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn destroy_domain(&self, domid: u32) -> Result<()> {
|
||||
pub async fn destroy_domain(&self, domid: u32) -> Result<()> {
|
||||
trace!(
|
||||
"domctl fd={} destroy_domain domid={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -527,11 +564,12 @@ impl XenCall {
|
||||
domid,
|
||||
value: DomCtlValue { pad: [0; 128] },
|
||||
};
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)?;
|
||||
self.hypercall1(HYPERVISOR_DOMCTL, addr_of_mut!(domctl) as c_ulong)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
|
||||
pub async fn get_memory_map(&self, size_of_entry: usize) -> Result<Vec<u8>> {
|
||||
let mut memory_map = MemoryMap {
|
||||
count: 0,
|
||||
buffer: 0,
|
||||
@ -540,18 +578,20 @@ impl XenCall {
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_MEMORY_MAP as c_ulong,
|
||||
addr_of_mut!(memory_map) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
let mut buffer = vec![0u8; memory_map.count as usize * size_of_entry];
|
||||
memory_map.buffer = buffer.as_mut_ptr() as c_ulong;
|
||||
self.hypercall2(
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_MEMORY_MAP as c_ulong,
|
||||
addr_of_mut!(memory_map) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
pub fn populate_physmap(
|
||||
pub async fn populate_physmap(
|
||||
&self,
|
||||
domid: u32,
|
||||
nr_extents: u64,
|
||||
@ -583,7 +623,7 @@ impl XenCall {
|
||||
0,
|
||||
],
|
||||
}];
|
||||
self.multicall(calls)?;
|
||||
self.multicall(calls).await?;
|
||||
let code = calls[0].result;
|
||||
if code > !0xfff {
|
||||
return Err(Error::PopulatePhysmapFailed);
|
||||
@ -595,7 +635,7 @@ impl XenCall {
|
||||
Ok(extents)
|
||||
}
|
||||
|
||||
pub fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
|
||||
pub async fn claim_pages(&self, domid: u32, pages: u64) -> Result<()> {
|
||||
trace!(
|
||||
"memory fd={} claim_pages domid={} pages={}",
|
||||
self.handle.as_raw_fd(),
|
||||
@ -613,11 +653,12 @@ impl XenCall {
|
||||
HYPERVISOR_MEMORY_OP,
|
||||
XEN_MEM_CLAIM_PAGES as c_ulong,
|
||||
addr_of_mut!(reservation) as c_ulong,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
|
||||
pub async fn mmuext(&self, domid: u32, cmd: c_uint, arg1: u64, arg2: u64) -> Result<()> {
|
||||
let mut ops = MmuExtOp { cmd, arg1, arg2 };
|
||||
|
||||
self.hypercall4(
|
||||
@ -627,6 +668,7 @@ impl XenCall {
|
||||
0,
|
||||
domid as c_ulong,
|
||||
)
|
||||
.await
|
||||
.map(|_| ())
|
||||
}
|
||||
}
|
||||
|
@ -2,20 +2,22 @@
|
||||
name = "krata-xenclient"
|
||||
description = "An implementation of Xen userspace for krata."
|
||||
license.workspace = true
|
||||
version= "0.0.3"
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition = "2021"
|
||||
resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
elf = { workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
log = { workspace = true }
|
||||
krata-xencall = { path = "../xencall", version = "^0.0.3" }
|
||||
krata-xenstore = { path = "../xenstore", version = "^0.0.3" }
|
||||
krata-xencall = { path = "../xencall", version = "^0.0.9" }
|
||||
krata-xenstore = { path = "../xenstore", version = "^0.0.9" }
|
||||
memchr = { workspace = true }
|
||||
nix = { workspace = true }
|
||||
slice-copy = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
@ -24,6 +26,7 @@ xz2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "xenclient"
|
||||
|
@ -13,7 +13,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
let kernel_image_path = args.get(1).expect("argument not specified");
|
||||
let initrd_path = args.get(2).expect("argument not specified");
|
||||
let mut client = XenClient::open(0).await?;
|
||||
let client = XenClient::open(0).await?;
|
||||
let config = DomainConfig {
|
||||
backend_domid: 0,
|
||||
name: "xenclient-test",
|
||||
@ -22,6 +22,7 @@ async fn main() -> Result<()> {
|
||||
kernel_path: kernel_image_path.as_str(),
|
||||
initrd_path: initrd_path.as_str(),
|
||||
cmdline: "debug elevator=noop",
|
||||
use_console_backend: None,
|
||||
disks: vec![],
|
||||
channels: vec![],
|
||||
vifs: vec![],
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user