mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-03 05:10:55 +00:00
feature(krata): rename guest to zone (#266)
This commit is contained in:
4
.github/workflows/check.yml
vendored
4
.github/workflows/check.yml
vendored
@ -127,7 +127,7 @@ jobs:
|
|||||||
run: ./hack/ci/install-linux-deps.sh
|
run: ./hack/ci/install-linux-deps.sh
|
||||||
- name: cargo clippy
|
- name: cargo clippy
|
||||||
run: ./hack/build/cargo.sh clippy
|
run: ./hack/build/cargo.sh clippy
|
||||||
guest-init:
|
zone-initrd:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -136,7 +136,7 @@ jobs:
|
|||||||
- aarch64
|
- aarch64
|
||||||
env:
|
env:
|
||||||
TARGET_ARCH: "${{ matrix.arch }}"
|
TARGET_ARCH: "${{ matrix.arch }}"
|
||||||
name: guest-init ${{ matrix.arch }}
|
name: zone initrd ${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: harden runner
|
- name: harden runner
|
||||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -124,7 +124,7 @@ jobs:
|
|||||||
- kratactl
|
- kratactl
|
||||||
- kratad
|
- kratad
|
||||||
- kratanet
|
- kratanet
|
||||||
- krata-guest-init
|
- krata-zone
|
||||||
name: nightly oci build ${{ matrix.component }}
|
name: nightly oci build ${{ matrix.component }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
2
.github/workflows/release-assets.yml
vendored
2
.github/workflows/release-assets.yml
vendored
@ -121,7 +121,7 @@ jobs:
|
|||||||
- kratactl
|
- kratactl
|
||||||
- kratad
|
- kratad
|
||||||
- kratanet
|
- kratanet
|
||||||
- krata-guest-init
|
- krata-zone
|
||||||
name: release-assets oci ${{ matrix.component }}
|
name: release-assets oci ${{ matrix.component }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
50
Cargo.lock
generated
50
Cargo.lock
generated
@ -1393,31 +1393,6 @@ dependencies = [
|
|||||||
"uuid",
|
"uuid",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "krata-guest"
|
|
||||||
version = "0.0.12"
|
|
||||||
dependencies = [
|
|
||||||
"anyhow",
|
|
||||||
"cgroups-rs",
|
|
||||||
"env_logger",
|
|
||||||
"futures",
|
|
||||||
"ipnetwork",
|
|
||||||
"krata",
|
|
||||||
"krata-xenstore",
|
|
||||||
"libc",
|
|
||||||
"log",
|
|
||||||
"nix 0.29.0",
|
|
||||||
"oci-spec",
|
|
||||||
"path-absolutize",
|
|
||||||
"platform-info",
|
|
||||||
"rtnetlink",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"sys-mount",
|
|
||||||
"sysinfo",
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "krata-loopdev"
|
name = "krata-loopdev"
|
||||||
version = "0.0.12"
|
version = "0.0.12"
|
||||||
@ -1603,6 +1578,31 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "krata-zone"
|
||||||
|
version = "0.0.12"
|
||||||
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"cgroups-rs",
|
||||||
|
"env_logger",
|
||||||
|
"futures",
|
||||||
|
"ipnetwork",
|
||||||
|
"krata",
|
||||||
|
"krata-xenstore",
|
||||||
|
"libc",
|
||||||
|
"log",
|
||||||
|
"nix 0.29.0",
|
||||||
|
"oci-spec",
|
||||||
|
"path-absolutize",
|
||||||
|
"platform-info",
|
||||||
|
"rtnetlink",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"sys-mount",
|
||||||
|
"sysinfo",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lazy_static"
|
name = "lazy_static"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
|
@ -3,7 +3,7 @@ members = [
|
|||||||
"crates/build",
|
"crates/build",
|
||||||
"crates/krata",
|
"crates/krata",
|
||||||
"crates/oci",
|
"crates/oci",
|
||||||
"crates/guest",
|
"crates/zone",
|
||||||
"crates/runtime",
|
"crates/runtime",
|
||||||
"crates/daemon",
|
"crates/daemon",
|
||||||
"crates/network",
|
"crates/network",
|
||||||
|
28
DEV.md
28
DEV.md
@ -9,7 +9,7 @@ krata is composed of four major executables:
|
|||||||
| kratad | host | backend daemon | ./hack/debug/kratad.sh | crates/daemon |
|
| kratad | host | backend daemon | ./hack/debug/kratad.sh | crates/daemon |
|
||||||
| kratanet | host | backend daemon | ./hack/debug/kratanet.sh | crates/network |
|
| kratanet | host | backend daemon | ./hack/debug/kratanet.sh | crates/network |
|
||||||
| kratactl | host | CLI tool | ./hack/debug/kratactl.sh | crates/ctl |
|
| kratactl | host | CLI tool | ./hack/debug/kratactl.sh | crates/ctl |
|
||||||
| krataguest | guest | none, guest init | N/A | crates/guest |
|
| kratazone | zone | none, zone init | N/A | crates/zone |
|
||||||
|
|
||||||
You will find the code to each executable available in the bin/ and src/ directories inside
|
You will find the code to each executable available in the bin/ and src/ directories inside
|
||||||
it's corresponding code path from the above table.
|
it's corresponding code path from the above table.
|
||||||
@ -19,7 +19,7 @@ it's corresponding code path from the above table.
|
|||||||
| Component | Specification | Notes |
|
| Component | Specification | Notes |
|
||||||
| ------------- | ------------- | --------------------------------------------------------------------------------- |
|
| ------------- | ------------- | --------------------------------------------------------------------------------- |
|
||||||
| Architecture | x86_64 | aarch64 support is still in development |
|
| Architecture | x86_64 | aarch64 support is still in development |
|
||||||
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata guests room |
|
| Memory | At least 6GB | dom0 will need to be configured with lower memory limit to give krata zones room |
|
||||||
| Xen | 4.17 | Temporary due to hardcoded interface version constants |
|
| Xen | 4.17 | Temporary due to hardcoded interface version constants |
|
||||||
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
|
| Debian | stable / sid | Debian is recommended due to the ease of Xen setup |
|
||||||
| rustup | any | Install Rustup from https://rustup.rs |
|
| rustup | any | Install Rustup from https://rustup.rs |
|
||||||
@ -45,10 +45,10 @@ $ rustup target add x86_64-unknown-linux-gnu
|
|||||||
$ rustup target add x86_64-unknown-linux-musl
|
$ rustup target add x86_64-unknown-linux-musl
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Configure `/etc/default/grub.d/xen.cfg` to give krata guests some room:
|
4. Configure `/etc/default/grub.d/xen.cfg` to give krata zones some room:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Configure dom0_mem to be 4GB, but leave the rest of the RAM for krata guests.
|
# Configure dom0_mem to be 4GB, but leave the rest of the RAM for krata zones.
|
||||||
GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=4G,max:4G"
|
GRUB_CMDLINE_XEN_DEFAULT="dom0_mem=4G,max:4G"
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -64,36 +64,36 @@ $ git clone https://github.com/edera-dev/krata.git krata
|
|||||||
$ cd krata
|
$ cd krata
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Fetch the guest kernel image:
|
6. Fetch the zone kernel image:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/kernel/fetch.sh -u
|
$ ./hack/kernel/fetch.sh -u
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Copy the guest kernel artifacts to `/var/lib/krata/guest/kernel` so it is automatically detected by kratad:
|
7. Copy the zone kernel artifacts to `/var/lib/krata/zone/kernel` so it is automatically detected by kratad:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ mkdir -p /var/lib/krata/guest
|
$ mkdir -p /var/lib/krata/zone
|
||||||
$ cp target/kernel/kernel-x86_64 /var/lib/krata/guest/kernel
|
$ cp target/kernel/kernel-x86_64 /var/lib/krata/zone/kernel
|
||||||
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/guest/addons.squashfs
|
$ cp target/kernel/addons-x86_64.squashfs /var/lib/krata/zone/addons.squashfs
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
8. Launch `./hack/debug/kratad.sh` and keep it running in the foreground.
|
||||||
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
9. Launch `./hack/debug/kratanet.sh` and keep it running in the foreground.
|
||||||
10. Run `kratactl` to launch a guest:
|
10. Run `kratactl` to launch a zone:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh launch --attach alpine:latest
|
$ ./hack/debug/kratactl.sh launch --attach alpine:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
To detach from the guest console, use `Ctrl + ]` on your keyboard.
|
To detach from the zone console, use `Ctrl + ]` on your keyboard.
|
||||||
|
|
||||||
To list the running guests, run:
|
To list the running zones, run:
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh list
|
$ ./hack/debug/kratactl.sh list
|
||||||
```
|
```
|
||||||
|
|
||||||
To destroy a running guest, copy it's UUID from either the launch command or the guest list and run:
|
To destroy a running zone, copy it's UUID from either the launch command or the zone list and run:
|
||||||
```sh
|
```sh
|
||||||
$ ./hack/debug/kratactl.sh destroy GUEST_UUID
|
$ ./hack/debug/kratactl.sh destroy ZONE_UUID
|
||||||
```
|
```
|
||||||
|
@ -39,7 +39,7 @@ async fn main() -> Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let image = ImageName::parse(&args().nth(1).unwrap())?;
|
let image = ImageName::parse(&args().nth(1).unwrap())?;
|
||||||
let mut cache_dir = std::env::temp_dir().clone();
|
let mut cache_dir = env::temp_dir().clone();
|
||||||
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
|
cache_dir.push(format!("krata-cache-{}", Uuid::new_v4()));
|
||||||
fs::create_dir_all(&cache_dir).await?;
|
fs::create_dir_all(&cache_dir).await?;
|
||||||
|
|
||||||
|
@ -7,13 +7,13 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use super::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Attach to the guest console")]
|
#[command(about = "Attach to the zone console")]
|
||||||
pub struct AttachCommand {
|
pub struct AttachCommand {
|
||||||
#[arg(help = "Guest to attach to, either the name or the uuid")]
|
#[arg(help = "Zone to attach to, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttachCommand {
|
impl AttachCommand {
|
||||||
@ -22,12 +22,12 @@ impl AttachCommand {
|
|||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let input = StdioConsoleStream::stdin_stream(guest_id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(zone_id.clone()).await;
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(guest_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
||||||
|
@ -3,10 +3,10 @@ use clap::Parser;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestStatus,
|
common::ZoneStatus,
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
DestroyGuestRequest,
|
DestroyZoneRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -14,19 +14,19 @@ use krata::{
|
|||||||
use log::error;
|
use log::error;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::cli::resolve_guest;
|
use crate::cli::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Destroy a guest")]
|
#[command(about = "Destroy a zone")]
|
||||||
pub struct DestroyCommand {
|
pub struct DestroyCommand {
|
||||||
#[arg(
|
#[arg(
|
||||||
short = 'W',
|
short = 'W',
|
||||||
long,
|
long,
|
||||||
help = "Wait for the destruction of the guest to complete"
|
help = "Wait for the destruction of the zone to complete"
|
||||||
)]
|
)]
|
||||||
wait: bool,
|
wait: bool,
|
||||||
#[arg(help = "Guest to destroy, either the name or the uuid")]
|
#[arg(help = "Zone to destroy, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DestroyCommand {
|
impl DestroyCommand {
|
||||||
@ -35,46 +35,46 @@ impl DestroyCommand {
|
|||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let _ = client
|
let _ = client
|
||||||
.destroy_guest(Request::new(DestroyGuestRequest {
|
.destroy_zone(Request::new(DestroyZoneRequest {
|
||||||
guest_id: guest_id.clone(),
|
zone_id: zone_id.clone(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if self.wait {
|
if self.wait {
|
||||||
wait_guest_destroyed(&guest_id, events).await?;
|
wait_zone_destroyed(&zone_id, events).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_guest_destroyed(id: &str, events: EventStream) -> Result<()> {
|
async fn wait_zone_destroyed(id: &str, events: EventStream) -> Result<()> {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = state.error_info {
|
||||||
if state.status() == GuestStatus::Failed {
|
if state.status() == ZoneStatus::Failed {
|
||||||
error!("destroy failed: {}", error.message);
|
error!("destroy failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
error!("guest error: {}", error.message);
|
error!("zone error: {}", error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Destroyed {
|
if state.status() == ZoneStatus::Destroyed {
|
||||||
std::process::exit(0);
|
std::process::exit(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,42 +4,42 @@ use anyhow::Result;
|
|||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{GuestTaskSpec, GuestTaskSpecEnvVar},
|
common::{ZoneTaskSpec, ZoneTaskSpecEnvVar},
|
||||||
control::{control_service_client::ControlServiceClient, ExecGuestRequest},
|
control::{control_service_client::ControlServiceClient, ExecZoneRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use super::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Execute a command inside the guest")]
|
#[command(about = "Execute a command inside the zone")]
|
||||||
pub struct ExecCommand {
|
pub struct ExecCommand {
|
||||||
#[arg[short, long, help = "Environment variables"]]
|
#[arg[short, long, help = "Environment variables"]]
|
||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
#[arg(help = "Guest to exec inside, either the name or the uuid")]
|
#[arg(help = "Zone to exec inside, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
allow_hyphen_values = true,
|
allow_hyphen_values = true,
|
||||||
trailing_var_arg = true,
|
trailing_var_arg = true,
|
||||||
help = "Command to run inside the guest"
|
help = "Command to run inside the zone"
|
||||||
)]
|
)]
|
||||||
command: Vec<String>,
|
command: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExecCommand {
|
impl ExecCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let initial = ExecGuestRequest {
|
let initial = ExecZoneRequest {
|
||||||
guest_id,
|
zone_id,
|
||||||
task: Some(GuestTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| GuestTaskSpecEnvVar {
|
.map(|(key, value)| ZoneTaskSpecEnvVar {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
value: value.clone(),
|
value: value.clone(),
|
||||||
})
|
})
|
||||||
@ -52,7 +52,7 @@ impl ExecCommand {
|
|||||||
|
|
||||||
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
let stream = StdioConsoleStream::stdin_stream_exec(initial).await;
|
||||||
|
|
||||||
let response = client.exec_guest(Request::new(stream)).await?.into_inner();
|
let response = client.exec_zone(Request::new(stream)).await?.into_inner();
|
||||||
|
|
||||||
let code = StdioConsoleStream::exec_output(response).await?;
|
let code = StdioConsoleStream::exec_output(response).await?;
|
||||||
std::process::exit(code);
|
std::process::exit(code);
|
||||||
|
@ -6,12 +6,12 @@ use krata::{
|
|||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{
|
common::{
|
||||||
guest_image_spec::Image, GuestImageSpec, GuestOciImageSpec, GuestSpec, GuestSpecDevice,
|
zone_image_spec::Image, OciImageFormat, ZoneImageSpec, ZoneOciImageSpec, ZoneSpec,
|
||||||
GuestStatus, GuestTaskSpec, GuestTaskSpecEnvVar, OciImageFormat,
|
ZoneSpecDevice, ZoneStatus, ZoneTaskSpec, ZoneTaskSpecEnvVar,
|
||||||
},
|
},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
CreateGuestRequest, PullImageRequest,
|
CreateZoneRequest, PullImageRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -28,56 +28,51 @@ pub enum LaunchImageFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Launch a new guest")]
|
#[command(about = "Launch a new zone")]
|
||||||
pub struct LaunchCommand {
|
pub struct LaunchCommand {
|
||||||
#[arg(long, default_value = "squashfs", help = "Image format")]
|
#[arg(long, default_value = "squashfs", help = "Image format")]
|
||||||
image_format: LaunchImageFormat,
|
image_format: LaunchImageFormat,
|
||||||
#[arg(long, help = "Overwrite image cache on pull")]
|
#[arg(long, help = "Overwrite image cache on pull")]
|
||||||
pull_overwrite_cache: bool,
|
pull_overwrite_cache: bool,
|
||||||
#[arg(short, long, help = "Name of the guest")]
|
#[arg(short, long, help = "Name of the zone")]
|
||||||
name: Option<String>,
|
name: Option<String>,
|
||||||
#[arg(
|
#[arg(short, long, default_value_t = 1, help = "vCPUs available to the zone")]
|
||||||
short,
|
|
||||||
long,
|
|
||||||
default_value_t = 1,
|
|
||||||
help = "vCPUs available to the guest"
|
|
||||||
)]
|
|
||||||
cpus: u32,
|
cpus: u32,
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short,
|
||||||
long,
|
long,
|
||||||
default_value_t = 512,
|
default_value_t = 512,
|
||||||
help = "Memory available to the guest, in megabytes"
|
help = "Memory available to the zone, in megabytes"
|
||||||
)]
|
)]
|
||||||
mem: u64,
|
mem: u64,
|
||||||
#[arg[short = 'D', long = "device", help = "Devices to request for the guest"]]
|
#[arg[short = 'D', long = "device", help = "Devices to request for the zone"]]
|
||||||
device: Vec<String>,
|
device: Vec<String>,
|
||||||
#[arg[short, long, help = "Environment variables set in the guest"]]
|
#[arg[short, long, help = "Environment variables set in the zone"]]
|
||||||
env: Option<Vec<String>>,
|
env: Option<Vec<String>>,
|
||||||
#[arg(
|
#[arg(
|
||||||
short,
|
short,
|
||||||
long,
|
long,
|
||||||
help = "Attach to the guest after guest starts, implies --wait"
|
help = "Attach to the zone after zone starts, implies --wait"
|
||||||
)]
|
)]
|
||||||
attach: bool,
|
attach: bool,
|
||||||
#[arg(
|
#[arg(
|
||||||
short = 'W',
|
short = 'W',
|
||||||
long,
|
long,
|
||||||
help = "Wait for the guest to start, implied by --attach"
|
help = "Wait for the zone to start, implied by --attach"
|
||||||
)]
|
)]
|
||||||
wait: bool,
|
wait: bool,
|
||||||
#[arg(short = 'k', long, help = "OCI kernel image for guest to use")]
|
#[arg(short = 'k', long, help = "OCI kernel image for zone to use")]
|
||||||
kernel: Option<String>,
|
kernel: Option<String>,
|
||||||
#[arg(short = 'I', long, help = "OCI initrd image for guest to use")]
|
#[arg(short = 'I', long, help = "OCI initrd image for zone to use")]
|
||||||
initrd: Option<String>,
|
initrd: Option<String>,
|
||||||
#[arg(short = 'w', long, help = "Working directory")]
|
#[arg(short = 'w', long, help = "Working directory")]
|
||||||
working_directory: Option<String>,
|
working_directory: Option<String>,
|
||||||
#[arg(help = "Container image for guest to use")]
|
#[arg(help = "Container image for zone to use")]
|
||||||
oci: String,
|
oci: String,
|
||||||
#[arg(
|
#[arg(
|
||||||
allow_hyphen_values = true,
|
allow_hyphen_values = true,
|
||||||
trailing_var_arg = true,
|
trailing_var_arg = true,
|
||||||
help = "Command to run inside the guest"
|
help = "Command to run inside the zone"
|
||||||
)]
|
)]
|
||||||
command: Vec<String>,
|
command: Vec<String>,
|
||||||
}
|
}
|
||||||
@ -117,18 +112,18 @@ impl LaunchCommand {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let request = CreateGuestRequest {
|
let request = CreateZoneRequest {
|
||||||
spec: Some(GuestSpec {
|
spec: Some(ZoneSpec {
|
||||||
name: self.name.unwrap_or_default(),
|
name: self.name.unwrap_or_default(),
|
||||||
image: Some(image),
|
image: Some(image),
|
||||||
kernel,
|
kernel,
|
||||||
initrd,
|
initrd,
|
||||||
vcpus: self.cpus,
|
vcpus: self.cpus,
|
||||||
mem: self.mem,
|
mem: self.mem,
|
||||||
task: Some(GuestTaskSpec {
|
task: Some(ZoneTaskSpec {
|
||||||
environment: env_map(&self.env.unwrap_or_default())
|
environment: env_map(&self.env.unwrap_or_default())
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| GuestTaskSpecEnvVar {
|
.map(|(key, value)| ZoneTaskSpecEnvVar {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
value: value.clone(),
|
value: value.clone(),
|
||||||
})
|
})
|
||||||
@ -140,26 +135,26 @@ impl LaunchCommand {
|
|||||||
devices: self
|
devices: self
|
||||||
.device
|
.device
|
||||||
.iter()
|
.iter()
|
||||||
.map(|name| GuestSpecDevice { name: name.clone() })
|
.map(|name| ZoneSpecDevice { name: name.clone() })
|
||||||
.collect(),
|
.collect(),
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
let response = client
|
let response = client
|
||||||
.create_guest(Request::new(request))
|
.create_zone(Request::new(request))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
let id = response.guest_id;
|
let id = response.zone_id;
|
||||||
|
|
||||||
if self.wait || self.attach {
|
if self.wait || self.attach {
|
||||||
wait_guest_started(&id, events.clone()).await?;
|
wait_zone_started(&id, events.clone()).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let code = if self.attach {
|
let code = if self.attach {
|
||||||
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
|
let input = StdioConsoleStream::stdin_stream(id.clone()).await;
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(id.clone(), events).await?;
|
||||||
select! {
|
select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
||||||
@ -180,7 +175,7 @@ impl LaunchCommand {
|
|||||||
client: &mut ControlServiceClient<Channel>,
|
client: &mut ControlServiceClient<Channel>,
|
||||||
image: &str,
|
image: &str,
|
||||||
format: OciImageFormat,
|
format: OciImageFormat,
|
||||||
) -> Result<GuestImageSpec> {
|
) -> Result<ZoneImageSpec> {
|
||||||
let response = client
|
let response = client
|
||||||
.pull_image(PullImageRequest {
|
.pull_image(PullImageRequest {
|
||||||
image: image.to_string(),
|
image: image.to_string(),
|
||||||
@ -189,8 +184,8 @@ impl LaunchCommand {
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
let reply = pull_interactive_progress(response.into_inner()).await?;
|
let reply = pull_interactive_progress(response.into_inner()).await?;
|
||||||
Ok(GuestImageSpec {
|
Ok(ZoneImageSpec {
|
||||||
image: Some(Image::Oci(GuestOciImageSpec {
|
image: Some(Image::Oci(ZoneOciImageSpec {
|
||||||
digest: reply.digest,
|
digest: reply.digest,
|
||||||
format: reply.format,
|
format: reply.format,
|
||||||
})),
|
})),
|
||||||
@ -198,38 +193,38 @@ impl LaunchCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn wait_guest_started(id: &str, events: EventStream) -> Result<()> {
|
async fn wait_zone_started(id: &str, events: EventStream) -> Result<()> {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
match event {
|
match event {
|
||||||
Event::GuestChanged(changed) => {
|
Event::ZoneChanged(changed) => {
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(ref error) = state.error_info {
|
if let Some(ref error) = state.error_info {
|
||||||
if state.status() == GuestStatus::Failed {
|
if state.status() == ZoneStatus::Failed {
|
||||||
error!("launch failed: {}", error.message);
|
error!("launch failed: {}", error.message);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
} else {
|
} else {
|
||||||
error!("guest error: {}", error.message);
|
error!("zone error: {}", error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Destroyed {
|
if state.status() == ZoneStatus::Destroyed {
|
||||||
error!("guest destroyed");
|
error!("zone destroyed");
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if state.status() == GuestStatus::Started {
|
if state.status() == ZoneStatus::Started {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,9 @@ use comfy_table::{presets::UTF8_FULL_CONDENSED, Cell, Color, Table};
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestStatus},
|
common::{Zone, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, ListGuestsRequest, ResolveGuestRequest,
|
control_service_client::ControlServiceClient, ListZonesRequest, ResolveZoneRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -14,7 +14,7 @@ use krata::{
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
use crate::format::{guest_simple_line, guest_status_text, kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line, zone_status_text};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum ListFormat {
|
enum ListFormat {
|
||||||
@ -28,12 +28,12 @@ enum ListFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "List the guests on the isolation engine")]
|
#[command(about = "List the zones on the isolation engine")]
|
||||||
pub struct ListCommand {
|
pub struct ListCommand {
|
||||||
#[arg(short, long, default_value = "table", help = "Output format")]
|
#[arg(short, long, default_value = "table", help = "Output format")]
|
||||||
format: ListFormat,
|
format: ListFormat,
|
||||||
#[arg(help = "Limit to a single guest, either the name or the uuid")]
|
#[arg(help = "Limit to a single zone, either the name or the uuid")]
|
||||||
guest: Option<String>,
|
zone: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListCommand {
|
impl ListCommand {
|
||||||
@ -42,27 +42,25 @@ impl ListCommand {
|
|||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
_events: EventStream,
|
_events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut guests = if let Some(ref guest) = self.guest {
|
let mut zones = if let Some(ref zone) = self.zone {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest { name: zone.clone() }))
|
||||||
name: guest.clone(),
|
|
||||||
}))
|
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
vec![guest]
|
vec![zone]
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("unable to resolve guest '{}'", guest));
|
return Err(anyhow!("unable to resolve zone '{}'", zone));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
client
|
client
|
||||||
.list_guests(Request::new(ListGuestsRequest {}))
|
.list_zones(Request::new(ListZonesRequest {}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests
|
.zones
|
||||||
};
|
};
|
||||||
|
|
||||||
guests.sort_by(|a, b| {
|
zones.sort_by(|a, b| {
|
||||||
a.spec
|
a.spec
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.name.as_str())
|
.map(|x| x.name.as_str())
|
||||||
@ -72,19 +70,19 @@ impl ListCommand {
|
|||||||
|
|
||||||
match self.format {
|
match self.format {
|
||||||
ListFormat::Table => {
|
ListFormat::Table => {
|
||||||
self.print_guest_table(guests)?;
|
self.print_zone_table(zones)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Simple => {
|
ListFormat::Simple => {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
println!("{}", guest_simple_line(&guest));
|
println!("{}", zone_simple_line(&zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Json | ListFormat::JsonPretty | ListFormat::Yaml => {
|
ListFormat::Json | ListFormat::JsonPretty | ListFormat::Yaml => {
|
||||||
let mut values = Vec::new();
|
let mut values = Vec::new();
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let message = proto2dynamic(guest)?;
|
let message = proto2dynamic(zone)?;
|
||||||
values.push(serde_json::to_value(message)?);
|
values.push(serde_json::to_value(message)?);
|
||||||
}
|
}
|
||||||
let value = Value::Array(values);
|
let value = Value::Array(values);
|
||||||
@ -99,64 +97,62 @@ impl ListCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::Jsonl => {
|
ListFormat::Jsonl => {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let message = proto2dynamic(guest)?;
|
let message = proto2dynamic(zone)?;
|
||||||
println!("{}", serde_json::to_string(&message)?);
|
println!("{}", serde_json::to_string(&message)?);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ListFormat::KeyValue => {
|
ListFormat::KeyValue => {
|
||||||
self.print_key_value(guests)?;
|
self.print_key_value(zones)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_guest_table(&self, guests: Vec<Guest>) -> Result<()> {
|
fn print_zone_table(&self, zones: Vec<Zone>) -> Result<()> {
|
||||||
let mut table = Table::new();
|
let mut table = Table::new();
|
||||||
table.load_preset(UTF8_FULL_CONDENSED);
|
table.load_preset(UTF8_FULL_CONDENSED);
|
||||||
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
table.set_content_arrangement(comfy_table::ContentArrangement::Dynamic);
|
||||||
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
table.set_header(vec!["name", "uuid", "status", "ipv4", "ipv6"]);
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let ipv4 = guest
|
let ipv4 = zone
|
||||||
.state
|
.state
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network.as_ref())
|
||||||
.map(|x| x.guest_ipv4.as_str())
|
.map(|x| x.zone_ipv4.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let ipv6 = guest
|
let ipv6 = zone
|
||||||
.state
|
.state
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|x| x.network.as_ref())
|
.and_then(|x| x.network.as_ref())
|
||||||
.map(|x| x.guest_ipv6.as_str())
|
.map(|x| x.zone_ipv6.as_str())
|
||||||
.unwrap_or("n/a");
|
.unwrap_or("n/a");
|
||||||
let Some(spec) = guest.spec else {
|
let Some(spec) = zone.spec else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let status = guest.state.as_ref().cloned().unwrap_or_default().status();
|
let status = zone.state.as_ref().cloned().unwrap_or_default().status();
|
||||||
let status_text = guest_status_text(status);
|
let status_text = zone_status_text(status);
|
||||||
|
|
||||||
let status_color = match status {
|
let status_color = match status {
|
||||||
GuestStatus::Destroyed | GuestStatus::Failed => Color::Red,
|
ZoneStatus::Destroyed | ZoneStatus::Failed => Color::Red,
|
||||||
GuestStatus::Destroying | GuestStatus::Exited | GuestStatus::Starting => {
|
ZoneStatus::Destroying | ZoneStatus::Exited | ZoneStatus::Starting => Color::Yellow,
|
||||||
Color::Yellow
|
ZoneStatus::Started => Color::Green,
|
||||||
}
|
|
||||||
GuestStatus::Started => Color::Green,
|
|
||||||
_ => Color::Reset,
|
_ => Color::Reset,
|
||||||
};
|
};
|
||||||
|
|
||||||
table.add_row(vec![
|
table.add_row(vec![
|
||||||
Cell::new(spec.name),
|
Cell::new(spec.name),
|
||||||
Cell::new(guest.id),
|
Cell::new(zone.id),
|
||||||
Cell::new(status_text).fg(status_color),
|
Cell::new(status_text).fg(status_color),
|
||||||
Cell::new(ipv4.to_string()),
|
Cell::new(ipv4.to_string()),
|
||||||
Cell::new(ipv6.to_string()),
|
Cell::new(ipv6.to_string()),
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
if table.is_empty() {
|
if table.is_empty() {
|
||||||
if self.guest.is_none() {
|
if self.zone.is_none() {
|
||||||
println!("no guests have been launched");
|
println!("no zones have been launched");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
println!("{}", table);
|
println!("{}", table);
|
||||||
@ -164,9 +160,9 @@ impl ListCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_key_value(&self, guests: Vec<Guest>) -> Result<()> {
|
fn print_key_value(&self, zones: Vec<Zone>) -> Result<()> {
|
||||||
for guest in guests {
|
for zone in zones {
|
||||||
let kvs = proto2kv(guest)?;
|
let kvs = proto2kv(zone)?;
|
||||||
println!("{}", kv2line(kvs),);
|
println!("{}", kv2line(kvs),);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -3,7 +3,7 @@ use async_stream::stream;
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::control::{control_service_client::ControlServiceClient, ConsoleDataRequest},
|
v1::control::{control_service_client::ControlServiceClient, ZoneConsoleRequest},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::select;
|
use tokio::select;
|
||||||
@ -12,15 +12,15 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::console::StdioConsoleStream;
|
use crate::console::StdioConsoleStream;
|
||||||
|
|
||||||
use super::resolve_guest;
|
use super::resolve_zone;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "View the logs of a guest")]
|
#[command(about = "View the logs of a zone")]
|
||||||
pub struct LogsCommand {
|
pub struct LogsCommand {
|
||||||
#[arg(short, long, help = "Follow output from the guest")]
|
#[arg(short, long, help = "Follow output from the zone")]
|
||||||
follow: bool,
|
follow: bool,
|
||||||
#[arg(help = "Guest to show logs for, either the name or the uuid")]
|
#[arg(help = "Zone to show logs for, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LogsCommand {
|
impl LogsCommand {
|
||||||
@ -29,22 +29,22 @@ impl LogsCommand {
|
|||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let guest_id_stream = guest_id.clone();
|
let zone_id_stream = zone_id.clone();
|
||||||
let follow = self.follow;
|
let follow = self.follow;
|
||||||
let input = stream! {
|
let input = stream! {
|
||||||
yield ConsoleDataRequest { guest_id: guest_id_stream, data: Vec::new() };
|
yield ZoneConsoleRequest { zone_id: zone_id_stream, data: Vec::new() };
|
||||||
if follow {
|
if follow {
|
||||||
let mut pending = pending::<ConsoleDataRequest>();
|
let mut pending = pending::<ZoneConsoleRequest>();
|
||||||
while let Some(x) = pending.next().await {
|
while let Some(x) = pending.next().await {
|
||||||
yield x;
|
yield x;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let output = client.console_data(input).await?.into_inner();
|
let output = client.attach_zone_console(input).await?.into_inner();
|
||||||
let stdout_handle =
|
let stdout_handle =
|
||||||
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
tokio::task::spawn(async move { StdioConsoleStream::stdout(output).await });
|
||||||
let exit_hook_task = StdioConsoleStream::guest_exit_hook(guest_id.clone(), events).await?;
|
let exit_hook_task = StdioConsoleStream::zone_exit_hook(zone_id.clone(), events).await?;
|
||||||
let code = select! {
|
let code = select! {
|
||||||
x = stdout_handle => {
|
x = stdout_handle => {
|
||||||
x??;
|
x??;
|
||||||
|
@ -3,8 +3,8 @@ use clap::{Parser, ValueEnum};
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestMetricNode,
|
common::ZoneMetricNode,
|
||||||
control::{control_service_client::ControlServiceClient, ReadGuestMetricsRequest},
|
control::{control_service_client::ControlServiceClient, ReadZoneMetricsRequest},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -12,7 +12,7 @@ use tonic::transport::Channel;
|
|||||||
|
|
||||||
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
|
use crate::format::{kv2line, metrics_flat, metrics_tree, proto2dynamic};
|
||||||
|
|
||||||
use super::resolve_guest;
|
use super::resolve_zone;
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum MetricsFormat {
|
enum MetricsFormat {
|
||||||
@ -24,12 +24,12 @@ enum MetricsFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Read metrics from the guest")]
|
#[command(about = "Read metrics from the zone")]
|
||||||
pub struct MetricsCommand {
|
pub struct MetricsCommand {
|
||||||
#[arg(short, long, default_value = "tree", help = "Output format")]
|
#[arg(short, long, default_value = "tree", help = "Output format")]
|
||||||
format: MetricsFormat,
|
format: MetricsFormat,
|
||||||
#[arg(help = "Guest to read metrics for, either the name or the uuid")]
|
#[arg(help = "Zone to read metrics for, either the name or the uuid")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetricsCommand {
|
impl MetricsCommand {
|
||||||
@ -38,9 +38,9 @@ impl MetricsCommand {
|
|||||||
mut client: ControlServiceClient<Channel>,
|
mut client: ControlServiceClient<Channel>,
|
||||||
_events: EventStream,
|
_events: EventStream,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let guest_id: String = resolve_guest(&mut client, &self.guest).await?;
|
let zone_id: String = resolve_zone(&mut client, &self.zone).await?;
|
||||||
let root = client
|
let root = client
|
||||||
.read_guest_metrics(ReadGuestMetricsRequest { guest_id })
|
.read_zone_metrics(ReadZoneMetricsRequest { zone_id })
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.root
|
.root
|
||||||
@ -70,12 +70,12 @@ impl MetricsCommand {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_metrics_tree(&self, root: GuestMetricNode) -> Result<()> {
|
fn print_metrics_tree(&self, root: ZoneMetricNode) -> Result<()> {
|
||||||
print!("{}", metrics_tree(root));
|
print!("{}", metrics_tree(root));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_key_value(&self, metrics: GuestMetricNode) -> Result<()> {
|
fn print_key_value(&self, metrics: ZoneMetricNode) -> Result<()> {
|
||||||
let kvs = metrics_flat(metrics);
|
let kvs = metrics_flat(metrics);
|
||||||
println!("{}", kv2line(kvs));
|
println!("{}", kv2line(kvs));
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -19,7 +19,7 @@ use clap::{Parser, Subcommand};
|
|||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::control::{control_service_client::ControlServiceClient, ResolveGuestRequest},
|
v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest},
|
||||||
};
|
};
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
@ -135,20 +135,20 @@ impl ControlCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn resolve_guest(
|
pub async fn resolve_zone(
|
||||||
client: &mut ControlServiceClient<Channel>,
|
client: &mut ControlServiceClient<Channel>,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest {
|
||||||
name: name.to_string(),
|
name: name.to_string(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
|
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
Ok(guest.id)
|
Ok(zone.id)
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow!("unable to resolve guest '{}'", name))
|
Err(anyhow!("unable to resolve zone '{}'", name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveGuestRequest};
|
use krata::v1::control::{control_service_client::ControlServiceClient, ResolveZoneRequest};
|
||||||
|
|
||||||
use tonic::{transport::Channel, Request};
|
use tonic::{transport::Channel, Request};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Resolve a guest name to a uuid")]
|
#[command(about = "Resolve a zone name to a uuid")]
|
||||||
pub struct ResolveCommand {
|
pub struct ResolveCommand {
|
||||||
#[arg(help = "Guest name")]
|
#[arg(help = "Zone name")]
|
||||||
guest: String,
|
zone: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResolveCommand {
|
impl ResolveCommand {
|
||||||
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
pub async fn run(self, mut client: ControlServiceClient<Channel>) -> Result<()> {
|
||||||
let reply = client
|
let reply = client
|
||||||
.resolve_guest(Request::new(ResolveGuestRequest {
|
.resolve_zone(Request::new(ResolveZoneRequest {
|
||||||
name: self.guest.clone(),
|
name: self.zone.clone(),
|
||||||
}))
|
}))
|
||||||
.await?
|
.await?
|
||||||
.into_inner();
|
.into_inner();
|
||||||
if let Some(guest) = reply.guest {
|
if let Some(zone) = reply.zone {
|
||||||
println!("{}", guest.id);
|
println!("{}", zone.id);
|
||||||
} else {
|
} else {
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
@ -24,14 +24,14 @@ use ratatui::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
format::guest_status_text,
|
format::zone_status_text,
|
||||||
metrics::{
|
metrics::{
|
||||||
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
lookup_metric_value, MultiMetricCollector, MultiMetricCollectorHandle, MultiMetricState,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Dashboard for running guests")]
|
#[command(about = "Dashboard for running zones")]
|
||||||
pub struct TopCommand {}
|
pub struct TopCommand {}
|
||||||
|
|
||||||
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
|
pub type Tui = Terminal<CrosstermBackend<Stdout>>;
|
||||||
@ -46,7 +46,7 @@ impl TopCommand {
|
|||||||
let collector = collector.launch().await?;
|
let collector = collector.launch().await?;
|
||||||
let mut tui = TopCommand::init()?;
|
let mut tui = TopCommand::init()?;
|
||||||
let mut app = TopApp {
|
let mut app = TopApp {
|
||||||
metrics: MultiMetricState { guests: vec![] },
|
metrics: MultiMetricState { zones: vec![] },
|
||||||
exit: false,
|
exit: false,
|
||||||
table: TableState::new(),
|
table: TableState::new(),
|
||||||
};
|
};
|
||||||
@ -152,12 +152,12 @@ impl Widget for &mut TopApp {
|
|||||||
|
|
||||||
let mut rows = vec![];
|
let mut rows = vec![];
|
||||||
|
|
||||||
for ms in &self.metrics.guests {
|
for ms in &self.metrics.zones {
|
||||||
let Some(ref spec) = ms.guest.spec else {
|
let Some(ref spec) = ms.zone.spec else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = ms.guest.state else {
|
let Some(ref state) = ms.zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -176,8 +176,8 @@ impl Widget for &mut TopApp {
|
|||||||
|
|
||||||
let row = Row::new(vec![
|
let row = Row::new(vec![
|
||||||
spec.name.clone(),
|
spec.name.clone(),
|
||||||
ms.guest.id.clone(),
|
ms.zone.id.clone(),
|
||||||
guest_status_text(state.status()),
|
zone_status_text(state.status()),
|
||||||
memory_total.unwrap_or_default(),
|
memory_total.unwrap_or_default(),
|
||||||
memory_used.unwrap_or_default(),
|
memory_used.unwrap_or_default(),
|
||||||
memory_free.unwrap_or_default(),
|
memory_free.unwrap_or_default(),
|
||||||
|
@ -2,12 +2,12 @@ use anyhow::Result;
|
|||||||
use clap::{Parser, ValueEnum};
|
use clap::{Parser, ValueEnum};
|
||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{common::Guest, control::watch_events_reply::Event},
|
v1::{common::Zone, control::watch_events_reply::Event},
|
||||||
};
|
};
|
||||||
use prost_reflect::ReflectMessage;
|
use prost_reflect::ReflectMessage;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::format::{guest_simple_line, kv2line, proto2dynamic, proto2kv};
|
use crate::format::{kv2line, proto2dynamic, proto2kv, zone_simple_line};
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
#[derive(ValueEnum, Clone, Debug, PartialEq, Eq)]
|
||||||
enum WatchFormat {
|
enum WatchFormat {
|
||||||
@ -17,7 +17,7 @@ enum WatchFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(about = "Watch for guest changes")]
|
#[command(about = "Watch for zone changes")]
|
||||||
pub struct WatchCommand {
|
pub struct WatchCommand {
|
||||||
#[arg(short, long, default_value = "simple", help = "Output format")]
|
#[arg(short, long, default_value = "simple", help = "Output format")]
|
||||||
format: WatchFormat,
|
format: WatchFormat,
|
||||||
@ -29,22 +29,17 @@ impl WatchCommand {
|
|||||||
loop {
|
loop {
|
||||||
let event = stream.recv().await?;
|
let event = stream.recv().await?;
|
||||||
|
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let guest = changed.guest.clone();
|
let zone = changed.zone.clone();
|
||||||
self.print_event("guest.changed", changed, guest)?;
|
self.print_event("zone.changed", changed, zone)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_event(
|
fn print_event(&self, typ: &str, event: impl ReflectMessage, zone: Option<Zone>) -> Result<()> {
|
||||||
&self,
|
|
||||||
typ: &str,
|
|
||||||
event: impl ReflectMessage,
|
|
||||||
guest: Option<Guest>,
|
|
||||||
) -> Result<()> {
|
|
||||||
match self.format {
|
match self.format {
|
||||||
WatchFormat::Simple => {
|
WatchFormat::Simple => {
|
||||||
if let Some(guest) = guest {
|
if let Some(zone) = zone {
|
||||||
println!("{}", guest_simple_line(&guest));
|
println!("{}", zone_simple_line(&zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,10 +7,10 @@ use crossterm::{
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::GuestStatus,
|
common::ZoneStatus,
|
||||||
control::{
|
control::{
|
||||||
watch_events_reply::Event, ConsoleDataReply, ConsoleDataRequest, ExecGuestReply,
|
watch_events_reply::Event, ExecZoneReply, ExecZoneRequest, ZoneConsoleReply,
|
||||||
ExecGuestRequest,
|
ZoneConsoleRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -25,10 +25,10 @@ use tonic::Streaming;
|
|||||||
pub struct StdioConsoleStream;
|
pub struct StdioConsoleStream;
|
||||||
|
|
||||||
impl StdioConsoleStream {
|
impl StdioConsoleStream {
|
||||||
pub async fn stdin_stream(guest: String) -> impl Stream<Item = ConsoleDataRequest> {
|
pub async fn stdin_stream(zone: String) -> impl Stream<Item = ZoneConsoleRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield ConsoleDataRequest { guest_id: guest, data: vec![] };
|
yield ZoneConsoleRequest { zone_id: zone, data: vec![] };
|
||||||
|
|
||||||
let mut buffer = vec![0u8; 60];
|
let mut buffer = vec![0u8; 60];
|
||||||
loop {
|
loop {
|
||||||
@ -43,14 +43,14 @@ impl StdioConsoleStream {
|
|||||||
if size == 1 && buffer[0] == 0x1d {
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
yield ConsoleDataRequest { guest_id: String::default(), data };
|
yield ZoneConsoleRequest { zone_id: String::default(), data };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdin_stream_exec(
|
pub async fn stdin_stream_exec(
|
||||||
initial: ExecGuestRequest,
|
initial: ExecZoneRequest,
|
||||||
) -> impl Stream<Item = ExecGuestRequest> {
|
) -> impl Stream<Item = ExecZoneRequest> {
|
||||||
let mut stdin = stdin();
|
let mut stdin = stdin();
|
||||||
stream! {
|
stream! {
|
||||||
yield initial;
|
yield initial;
|
||||||
@ -68,12 +68,12 @@ impl StdioConsoleStream {
|
|||||||
if size == 1 && buffer[0] == 0x1d {
|
if size == 1 && buffer[0] == 0x1d {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
yield ExecGuestRequest { guest_id: String::default(), task: None, data };
|
yield ExecZoneRequest { zone_id: String::default(), task: None, data };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stdout(mut stream: Streaming<ConsoleDataReply>) -> Result<()> {
|
pub async fn stdout(mut stream: Streaming<ZoneConsoleReply>) -> Result<()> {
|
||||||
if stdin().is_tty() {
|
if stdin().is_tty() {
|
||||||
enable_raw_mode()?;
|
enable_raw_mode()?;
|
||||||
StdioConsoleStream::register_terminal_restore_hook()?;
|
StdioConsoleStream::register_terminal_restore_hook()?;
|
||||||
@ -90,7 +90,7 @@ impl StdioConsoleStream {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn exec_output(mut stream: Streaming<ExecGuestReply>) -> Result<i32> {
|
pub async fn exec_output(mut stream: Streaming<ExecZoneReply>) -> Result<i32> {
|
||||||
let mut stdout = stdout();
|
let mut stdout = stdout();
|
||||||
let mut stderr = stderr();
|
let mut stderr = stderr();
|
||||||
while let Some(reply) = stream.next().await {
|
while let Some(reply) = stream.next().await {
|
||||||
@ -106,33 +106,33 @@ impl StdioConsoleStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reply.exited {
|
if reply.exited {
|
||||||
if reply.error.is_empty() {
|
return if reply.error.is_empty() {
|
||||||
return Ok(reply.exit_code);
|
Ok(reply.exit_code)
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!("exec failed: {}", reply.error));
|
Err(anyhow!("exec failed: {}", reply.error))
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(-1)
|
Ok(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn guest_exit_hook(
|
pub async fn zone_exit_hook(
|
||||||
id: String,
|
id: String,
|
||||||
events: EventStream,
|
events: EventStream,
|
||||||
) -> Result<JoinHandle<Option<i32>>> {
|
) -> Result<JoinHandle<Option<i32>>> {
|
||||||
Ok(tokio::task::spawn(async move {
|
Ok(tokio::task::spawn(async move {
|
||||||
let mut stream = events.subscribe();
|
let mut stream = events.subscribe();
|
||||||
while let Ok(event) = stream.recv().await {
|
while let Ok(event) = stream.recv().await {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(state) = guest.state else {
|
let Some(state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if guest.id != id {
|
if zone.id != id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +141,7 @@ impl StdioConsoleStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let status = state.status();
|
let status = state.status();
|
||||||
if status == GuestStatus::Destroying || status == GuestStatus::Destroyed {
|
if status == ZoneStatus::Destroying || status == ZoneStatus::Destroyed {
|
||||||
return Some(10);
|
return Some(10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use std::{collections::HashMap, time::Duration};
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use fancy_duration::FancyDuration;
|
use fancy_duration::FancyDuration;
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use krata::v1::common::{Guest, GuestMetricFormat, GuestMetricNode, GuestStatus};
|
use krata::v1::common::{Zone, ZoneMetricFormat, ZoneMetricNode, ZoneStatus};
|
||||||
use prost_reflect::{DynamicMessage, ReflectMessage};
|
use prost_reflect::{DynamicMessage, ReflectMessage};
|
||||||
use prost_types::Value;
|
use prost_types::Value;
|
||||||
use termtree::Tree;
|
use termtree::Tree;
|
||||||
@ -75,32 +75,31 @@ pub fn kv2line(map: HashMap<String, String>) -> String {
|
|||||||
.join(" ")
|
.join(" ")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guest_status_text(status: GuestStatus) -> String {
|
pub fn zone_status_text(status: ZoneStatus) -> String {
|
||||||
match status {
|
match status {
|
||||||
GuestStatus::Starting => "starting",
|
ZoneStatus::Starting => "starting",
|
||||||
GuestStatus::Started => "started",
|
ZoneStatus::Started => "started",
|
||||||
GuestStatus::Destroying => "destroying",
|
ZoneStatus::Destroying => "destroying",
|
||||||
GuestStatus::Destroyed => "destroyed",
|
ZoneStatus::Destroyed => "destroyed",
|
||||||
GuestStatus::Exited => "exited",
|
ZoneStatus::Exited => "exited",
|
||||||
GuestStatus::Failed => "failed",
|
ZoneStatus::Failed => "failed",
|
||||||
_ => "unknown",
|
_ => "unknown",
|
||||||
}
|
}
|
||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guest_simple_line(guest: &Guest) -> String {
|
pub fn zone_simple_line(zone: &Zone) -> String {
|
||||||
let state = guest_status_text(
|
let state = zone_status_text(
|
||||||
guest
|
zone.state
|
||||||
.state
|
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|x| x.status())
|
.map(|x| x.status())
|
||||||
.unwrap_or(GuestStatus::Unknown),
|
.unwrap_or(ZoneStatus::Unknown),
|
||||||
);
|
);
|
||||||
let name = guest.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
let name = zone.spec.as_ref().map(|x| x.name.as_str()).unwrap_or("");
|
||||||
let network = guest.state.as_ref().and_then(|x| x.network.as_ref());
|
let network = zone.state.as_ref().and_then(|x| x.network.as_ref());
|
||||||
let ipv4 = network.map(|x| x.guest_ipv4.as_str()).unwrap_or("");
|
let ipv4 = network.map(|x| x.zone_ipv4.as_str()).unwrap_or("");
|
||||||
let ipv6 = network.map(|x| x.guest_ipv6.as_str()).unwrap_or("");
|
let ipv6 = network.map(|x| x.zone_ipv6.as_str()).unwrap_or("");
|
||||||
format!("{}\t{}\t{}\t{}\t{}", guest.id, state, name, ipv4, ipv6)
|
format!("{}\t{}\t{}\t{}\t{}", zone.id, state, name, ipv4, ipv6)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metrics_value_string(value: Value) -> String {
|
fn metrics_value_string(value: Value) -> String {
|
||||||
@ -116,18 +115,18 @@ fn metrics_value_numeric(value: Value) -> f64 {
|
|||||||
string.parse::<f64>().ok().unwrap_or(f64::NAN)
|
string.parse::<f64>().ok().unwrap_or(f64::NAN)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_value_pretty(value: Value, format: GuestMetricFormat) -> String {
|
pub fn metrics_value_pretty(value: Value, format: ZoneMetricFormat) -> String {
|
||||||
match format {
|
match format {
|
||||||
GuestMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
|
ZoneMetricFormat::Bytes => human_bytes(metrics_value_numeric(value)),
|
||||||
GuestMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
|
ZoneMetricFormat::Integer => (metrics_value_numeric(value) as u64).to_string(),
|
||||||
GuestMetricFormat::DurationSeconds => {
|
ZoneMetricFormat::DurationSeconds => {
|
||||||
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
|
FancyDuration(Duration::from_secs_f64(metrics_value_numeric(value))).to_string()
|
||||||
}
|
}
|
||||||
_ => metrics_value_string(value),
|
_ => metrics_value_string(value),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metrics_flat_internal(prefix: &str, node: GuestMetricNode, map: &mut HashMap<String, String>) {
|
fn metrics_flat_internal(prefix: &str, node: ZoneMetricNode, map: &mut HashMap<String, String>) {
|
||||||
if let Some(value) = node.value {
|
if let Some(value) = node.value {
|
||||||
map.insert(prefix.to_string(), metrics_value_string(value));
|
map.insert(prefix.to_string(), metrics_value_string(value));
|
||||||
}
|
}
|
||||||
@ -142,13 +141,13 @@ fn metrics_flat_internal(prefix: &str, node: GuestMetricNode, map: &mut HashMap<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_flat(root: GuestMetricNode) -> HashMap<String, String> {
|
pub fn metrics_flat(root: ZoneMetricNode) -> HashMap<String, String> {
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
metrics_flat_internal("", root, &mut map);
|
metrics_flat_internal("", root, &mut map);
|
||||||
map
|
map
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metrics_tree(node: GuestMetricNode) -> Tree<String> {
|
pub fn metrics_tree(node: ZoneMetricNode) -> Tree<String> {
|
||||||
let mut name = node.name.to_string();
|
let mut name = node.name.to_string();
|
||||||
let format = node.format();
|
let format = node.format();
|
||||||
if let Some(value) = node.value {
|
if let Some(value) = node.value {
|
||||||
|
@ -2,10 +2,10 @@ use anyhow::Result;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestMetricNode, GuestStatus},
|
common::{Zone, ZoneMetricNode, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
ListGuestsRequest, ReadGuestMetricsRequest,
|
ListZonesRequest, ReadZoneMetricsRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -22,12 +22,12 @@ use tonic::transport::Channel;
|
|||||||
use crate::format::metrics_value_pretty;
|
use crate::format::metrics_value_pretty;
|
||||||
|
|
||||||
pub struct MetricState {
|
pub struct MetricState {
|
||||||
pub guest: Guest,
|
pub zone: Zone,
|
||||||
pub root: Option<GuestMetricNode>,
|
pub root: Option<ZoneMetricNode>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MultiMetricState {
|
pub struct MultiMetricState {
|
||||||
pub guests: Vec<MetricState>,
|
pub zones: Vec<MetricState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MultiMetricCollector {
|
pub struct MultiMetricCollector {
|
||||||
@ -72,26 +72,26 @@ impl MultiMetricCollector {
|
|||||||
|
|
||||||
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
|
pub async fn process(&mut self, sender: Sender<MultiMetricState>) -> Result<()> {
|
||||||
let mut events = self.events.subscribe();
|
let mut events = self.events.subscribe();
|
||||||
let mut guests: Vec<Guest> = self
|
let mut zones: Vec<Zone> = self
|
||||||
.client
|
.client
|
||||||
.list_guests(ListGuestsRequest {})
|
.list_zones(ListZonesRequest {})
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests;
|
.zones;
|
||||||
loop {
|
loop {
|
||||||
let collect = select! {
|
let collect = select! {
|
||||||
x = events.recv() => match x {
|
x = events.recv() => match x {
|
||||||
Ok(event) => {
|
Ok(event) => {
|
||||||
let Event::GuestChanged(changed) = event;
|
let Event::ZoneChanged(changed) = event;
|
||||||
let Some(guest) = changed.guest else {
|
let Some(zone) = changed.zone else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
guests.retain(|x| x.id != guest.id);
|
zones.retain(|x| x.id != zone.id);
|
||||||
if state.status() != GuestStatus::Destroying {
|
if state.status() != ZoneStatus::Destroying {
|
||||||
guests.push(guest);
|
zones.push(zone);
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
},
|
},
|
||||||
@ -111,19 +111,19 @@ impl MultiMetricCollector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut metrics = Vec::new();
|
let mut metrics = Vec::new();
|
||||||
for guest in &guests {
|
for zone in &zones {
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
if state.status() != GuestStatus::Started {
|
if state.status() != ZoneStatus::Started {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let root = timeout(
|
let root = timeout(
|
||||||
Duration::from_secs(5),
|
Duration::from_secs(5),
|
||||||
self.client.read_guest_metrics(ReadGuestMetricsRequest {
|
self.client.read_zone_metrics(ReadZoneMetricsRequest {
|
||||||
guest_id: guest.id.clone(),
|
zone_id: zone.id.clone(),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -132,16 +132,16 @@ impl MultiMetricCollector {
|
|||||||
.map(|x| x.into_inner())
|
.map(|x| x.into_inner())
|
||||||
.and_then(|x| x.root);
|
.and_then(|x| x.root);
|
||||||
metrics.push(MetricState {
|
metrics.push(MetricState {
|
||||||
guest: guest.clone(),
|
zone: zone.clone(),
|
||||||
root,
|
root,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
sender.send(MultiMetricState { guests: metrics }).await?;
|
sender.send(MultiMetricState { zones: metrics }).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup<'a>(node: &'a GuestMetricNode, path: &str) -> Option<&'a GuestMetricNode> {
|
pub fn lookup<'a>(node: &'a ZoneMetricNode, path: &str) -> Option<&'a ZoneMetricNode> {
|
||||||
let Some((what, b)) = path.split_once('/') else {
|
let Some((what, b)) = path.split_once('/') else {
|
||||||
return node.children.iter().find(|x| x.name == path);
|
return node.children.iter().find(|x| x.name == path);
|
||||||
};
|
};
|
||||||
@ -149,7 +149,7 @@ pub fn lookup<'a>(node: &'a GuestMetricNode, path: &str) -> Option<&'a GuestMetr
|
|||||||
return lookup(next, b);
|
return lookup(next, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn lookup_metric_value(node: &GuestMetricNode, path: &str) -> Option<String> {
|
pub fn lookup_metric_value(node: &ZoneMetricNode, path: &str) -> Option<String> {
|
||||||
lookup(node, path).and_then(|x| {
|
lookup(node, path).and_then(|x| {
|
||||||
x.value
|
x.value
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -13,7 +13,7 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::glt::GuestLookupTable;
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
|
||||||
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
||||||
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
||||||
@ -24,7 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonConsoleHandle {
|
pub struct DaemonConsoleHandle {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
sender: Sender<(u32, Vec<u8>)>,
|
sender: Sender<(u32, Vec<u8>)>,
|
||||||
@ -84,7 +84,7 @@ impl Drop for DaemonConsoleHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonConsole {
|
pub struct DaemonConsole {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
listeners: ListenerMap,
|
listeners: ListenerMap,
|
||||||
buffers: BufferMap,
|
buffers: BufferMap,
|
||||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||||
@ -93,7 +93,7 @@ pub struct DaemonConsole {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonConsole {
|
impl DaemonConsole {
|
||||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
|
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonConsole> {
|
||||||
let (service, sender, receiver) =
|
let (service, sender, receiver) =
|
||||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||||
let task = service.launch().await?;
|
let task = service.launch().await?;
|
||||||
|
@ -7,16 +7,16 @@ use krata::{
|
|||||||
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
||||||
},
|
},
|
||||||
v1::{
|
v1::{
|
||||||
common::{Guest, GuestState, GuestStatus, OciImageFormat},
|
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
|
||||||
control::{
|
control::{
|
||||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
|
||||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
|
||||||
DeviceInfo, ExecGuestReply, ExecGuestRequest, HostCpuTopologyInfo,
|
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
|
||||||
HostCpuTopologyReply, HostCpuTopologyRequest, HostPowerManagementPolicy,
|
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
|
||||||
IdentifyHostReply, IdentifyHostRequest, ListDevicesReply, ListDevicesRequest,
|
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
||||||
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
|
||||||
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
|
||||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
ZoneConsoleRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -37,9 +37,9 @@ use tonic::{Request, Response, Status, Streaming};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
|
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
|
||||||
devices::DaemonDeviceManager, event::DaemonEventContext, glt::GuestLookupTable,
|
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
|
||||||
idm::DaemonIdmHandle, metrics::idm_metric_to_api, oci::convert_oci_progress,
|
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct ApiError {
|
pub struct ApiError {
|
||||||
@ -62,13 +62,13 @@ impl From<ApiError> for Status {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonControlService {
|
pub struct DaemonControlService {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
}
|
}
|
||||||
@ -76,13 +76,13 @@ pub struct DaemonControlService {
|
|||||||
impl DaemonControlService {
|
impl DaemonControlService {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -92,8 +92,8 @@ impl DaemonControlService {
|
|||||||
events,
|
events,
|
||||||
console,
|
console,
|
||||||
idm,
|
idm,
|
||||||
guests,
|
zones,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
packer,
|
packer,
|
||||||
runtime,
|
runtime,
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ impl DaemonControlService {
|
|||||||
|
|
||||||
enum ConsoleDataSelect {
|
enum ConsoleDataSelect {
|
||||||
Read(Option<Vec<u8>>),
|
Read(Option<Vec<u8>>),
|
||||||
Write(Option<Result<ConsoleDataRequest, tonic::Status>>),
|
Write(Option<Result<ZoneConsoleRequest, Status>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
enum PullImageSelect {
|
enum PullImageSelect {
|
||||||
@ -112,11 +112,11 @@ enum PullImageSelect {
|
|||||||
|
|
||||||
#[tonic::async_trait]
|
#[tonic::async_trait]
|
||||||
impl ControlService for DaemonControlService {
|
impl ControlService for DaemonControlService {
|
||||||
type ExecGuestStream =
|
type ExecZoneStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
type ConsoleDataStream =
|
type AttachZoneConsoleStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
||||||
|
|
||||||
type PullImageStream =
|
type PullImageStream =
|
||||||
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
||||||
@ -139,25 +139,25 @@ impl ControlService for DaemonControlService {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn create_guest(
|
async fn create_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<CreateGuestRequest>,
|
request: Request<CreateZoneRequest>,
|
||||||
) -> Result<Response<CreateGuestReply>, Status> {
|
) -> Result<Response<CreateZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let Some(spec) = request.spec else {
|
let Some(spec) = request.spec else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest spec not provided".to_string(),
|
message: "zone spec not provided".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
self.guests
|
self.zones
|
||||||
.update(
|
.update(
|
||||||
uuid,
|
uuid,
|
||||||
Guest {
|
Zone {
|
||||||
id: uuid.to_string(),
|
id: uuid.to_string(),
|
||||||
state: Some(GuestState {
|
state: Some(ZoneState {
|
||||||
status: GuestStatus::Starting.into(),
|
status: ZoneStatus::Starting.into(),
|
||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
@ -169,21 +169,21 @@ impl ControlService for DaemonControlService {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
self.guest_reconciler_notify
|
self.zone_reconciler_notify
|
||||||
.send(uuid)
|
.send(uuid)
|
||||||
.await
|
.await
|
||||||
.map_err(|x| ApiError {
|
.map_err(|x| ApiError {
|
||||||
message: x.to_string(),
|
message: x.to_string(),
|
||||||
})?;
|
})?;
|
||||||
Ok(Response::new(CreateGuestReply {
|
Ok(Response::new(CreateZoneReply {
|
||||||
guest_id: uuid.to_string(),
|
zone_id: uuid.to_string(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exec_guest(
|
async fn exec_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<Streaming<ExecGuestRequest>>,
|
request: Request<Streaming<ExecZoneRequest>>,
|
||||||
) -> Result<Response<Self::ExecGuestStream>, Status> {
|
) -> Result<Response<Self::ExecZoneStream>, Status> {
|
||||||
let mut input = request.into_inner();
|
let mut input = request.into_inner();
|
||||||
let Some(request) = input.next().await else {
|
let Some(request) = input.next().await else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
@ -200,7 +200,7 @@ impl ControlService for DaemonControlService {
|
|||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
|
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
@ -232,7 +232,7 @@ impl ControlService for DaemonControlService {
|
|||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = input.next() => if let Some(update) = x {
|
x = input.next() => if let Some(update) = x {
|
||||||
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
|
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||||
message: error.to_string()
|
message: error.to_string()
|
||||||
}.into());
|
}.into());
|
||||||
|
|
||||||
@ -252,7 +252,7 @@ impl ControlService for DaemonControlService {
|
|||||||
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
let reply = ExecGuestReply {
|
let reply = ExecZoneReply {
|
||||||
exited: update.exited,
|
exited: update.exited,
|
||||||
error: update.error,
|
error: update.error,
|
||||||
exit_code: update.exit_code,
|
exit_code: update.exit_code,
|
||||||
@ -269,80 +269,80 @@ impl ControlService for DaemonControlService {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
|
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn destroy_guest(
|
async fn destroy_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<DestroyGuestRequest>,
|
request: Request<DestroyZoneRequest>,
|
||||||
) -> Result<Response<DestroyGuestReply>, Status> {
|
) -> Result<Response<DestroyZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let Some(mut guest) = self.guests.read(uuid).await.map_err(ApiError::from)? else {
|
let Some(mut zone) = self.zones.read(uuid).await.map_err(ApiError::from)? else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest not found".to_string(),
|
message: "zone not found".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
|
|
||||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||||
|
|
||||||
if guest.state.as_ref().unwrap().status() == GuestStatus::Destroyed {
|
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
message: "guest already destroyed".to_string(),
|
message: "zone already destroyed".to_string(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
guest.state.as_mut().unwrap().status = GuestStatus::Destroying.into();
|
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
|
||||||
self.guests
|
self.zones
|
||||||
.update(uuid, guest)
|
.update(uuid, zone)
|
||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
self.guest_reconciler_notify
|
self.zone_reconciler_notify
|
||||||
.send(uuid)
|
.send(uuid)
|
||||||
.await
|
.await
|
||||||
.map_err(|x| ApiError {
|
.map_err(|x| ApiError {
|
||||||
message: x.to_string(),
|
message: x.to_string(),
|
||||||
})?;
|
})?;
|
||||||
Ok(Response::new(DestroyGuestReply {}))
|
Ok(Response::new(DestroyZoneReply {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_guests(
|
async fn list_zones(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ListGuestsRequest>,
|
request: Request<ListZonesRequest>,
|
||||||
) -> Result<Response<ListGuestsReply>, Status> {
|
) -> Result<Response<ListZonesReply>, Status> {
|
||||||
let _ = request.into_inner();
|
let _ = request.into_inner();
|
||||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||||
let guests = guests.into_values().collect::<Vec<Guest>>();
|
let zones = zones.into_values().collect::<Vec<Zone>>();
|
||||||
Ok(Response::new(ListGuestsReply { guests }))
|
Ok(Response::new(ListZonesReply { zones }))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn resolve_guest(
|
async fn resolve_zone(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ResolveGuestRequest>,
|
request: Request<ResolveZoneRequest>,
|
||||||
) -> Result<Response<ResolveGuestReply>, Status> {
|
) -> Result<Response<ResolveZoneReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||||
let guests = guests
|
let zones = zones
|
||||||
.into_values()
|
.into_values()
|
||||||
.filter(|x| {
|
.filter(|x| {
|
||||||
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
||||||
(!request.name.is_empty() && comparison_spec.name == request.name)
|
(!request.name.is_empty() && comparison_spec.name == request.name)
|
||||||
|| x.id == request.name
|
|| x.id == request.name
|
||||||
})
|
})
|
||||||
.collect::<Vec<Guest>>();
|
.collect::<Vec<Zone>>();
|
||||||
Ok(Response::new(ResolveGuestReply {
|
Ok(Response::new(ResolveZoneReply {
|
||||||
guest: guests.first().cloned(),
|
zone: zones.first().cloned(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn console_data(
|
async fn attach_zone_console(
|
||||||
&self,
|
&self,
|
||||||
request: Request<Streaming<ConsoleDataRequest>>,
|
request: Request<Streaming<ZoneConsoleRequest>>,
|
||||||
) -> Result<Response<Self::ConsoleDataStream>, Status> {
|
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
|
||||||
let mut input = request.into_inner();
|
let mut input = request.into_inner();
|
||||||
let Some(request) = input.next().await else {
|
let Some(request) = input.next().await else {
|
||||||
return Err(ApiError {
|
return Err(ApiError {
|
||||||
@ -351,7 +351,7 @@ impl ControlService for DaemonControlService {
|
|||||||
.into());
|
.into());
|
||||||
};
|
};
|
||||||
let request = request?;
|
let request = request?;
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let (sender, mut receiver) = channel(100);
|
let (sender, mut receiver) = channel(100);
|
||||||
@ -364,7 +364,7 @@ impl ControlService for DaemonControlService {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let output = try_stream! {
|
let output = try_stream! {
|
||||||
yield ConsoleDataReply { data: console.initial.clone(), };
|
yield ZoneConsoleReply { data: console.initial.clone(), };
|
||||||
loop {
|
loop {
|
||||||
let what = select! {
|
let what = select! {
|
||||||
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
||||||
@ -373,7 +373,7 @@ impl ControlService for DaemonControlService {
|
|||||||
|
|
||||||
match what {
|
match what {
|
||||||
ConsoleDataSelect::Read(Some(data)) => {
|
ConsoleDataSelect::Read(Some(data)) => {
|
||||||
yield ConsoleDataReply { data, };
|
yield ZoneConsoleReply { data, };
|
||||||
},
|
},
|
||||||
|
|
||||||
ConsoleDataSelect::Read(None) => {
|
ConsoleDataSelect::Read(None) => {
|
||||||
@ -396,15 +396,17 @@ impl ControlService for DaemonControlService {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(output) as Self::ConsoleDataStream))
|
Ok(Response::new(
|
||||||
|
Box::pin(output) as Self::AttachZoneConsoleStream
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_guest_metrics(
|
async fn read_zone_metrics(
|
||||||
&self,
|
&self,
|
||||||
request: Request<ReadGuestMetricsRequest>,
|
request: Request<ReadZoneMetricsRequest>,
|
||||||
) -> Result<Response<ReadGuestMetricsReply>, Status> {
|
) -> Result<Response<ReadZoneMetricsReply>, Status> {
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||||
@ -420,7 +422,7 @@ impl ControlService for DaemonControlService {
|
|||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut reply = ReadGuestMetricsReply::default();
|
let mut reply = ReadZoneMetricsReply::default();
|
||||||
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
||||||
reply.root = metrics.root.map(idm_metric_to_api);
|
reply.root = metrics.root.map(idm_metric_to_api);
|
||||||
}
|
}
|
||||||
|
@ -1,66 +1,66 @@
|
|||||||
use std::{collections::HashMap, path::Path, sync::Arc};
|
use std::{collections::HashMap, path::Path, sync::Arc};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::common::Guest;
|
use krata::v1::common::Zone;
|
||||||
use log::error;
|
use log::error;
|
||||||
use prost::Message;
|
use prost::Message;
|
||||||
use redb::{Database, ReadableTable, TableDefinition};
|
use redb::{Database, ReadableTable, TableDefinition};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
const GUESTS: TableDefinition<u128, &[u8]> = TableDefinition::new("guests");
|
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestStore {
|
pub struct ZoneStore {
|
||||||
database: Arc<Database>,
|
database: Arc<Database>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestStore {
|
impl ZoneStore {
|
||||||
pub fn open(path: &Path) -> Result<Self> {
|
pub fn open(path: &Path) -> Result<Self> {
|
||||||
let database = Database::create(path)?;
|
let database = Database::create(path)?;
|
||||||
let write = database.begin_write()?;
|
let write = database.begin_write()?;
|
||||||
let _ = write.open_table(GUESTS);
|
let _ = write.open_table(ZONES);
|
||||||
write.commit()?;
|
write.commit()?;
|
||||||
Ok(GuestStore {
|
Ok(ZoneStore {
|
||||||
database: Arc::new(database),
|
database: Arc::new(database),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(&self, id: Uuid) -> Result<Option<Guest>> {
|
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
||||||
let read = self.database.begin_read()?;
|
let read = self.database.begin_read()?;
|
||||||
let table = read.open_table(GUESTS)?;
|
let table = read.open_table(ZONES)?;
|
||||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let bytes = entry.value();
|
let bytes = entry.value();
|
||||||
Ok(Some(Guest::decode(bytes)?))
|
Ok(Some(Zone::decode(bytes)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<HashMap<Uuid, Guest>> {
|
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
||||||
let mut guests: HashMap<Uuid, Guest> = HashMap::new();
|
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||||
let read = self.database.begin_read()?;
|
let read = self.database.begin_read()?;
|
||||||
let table = read.open_table(GUESTS)?;
|
let table = read.open_table(ZONES)?;
|
||||||
for result in table.iter()? {
|
for result in table.iter()? {
|
||||||
let (key, value) = result?;
|
let (key, value) = result?;
|
||||||
let uuid = Uuid::from_u128_le(key.value());
|
let uuid = Uuid::from_u128_le(key.value());
|
||||||
let state = match Guest::decode(value.value()) {
|
let state = match Zone::decode(value.value()) {
|
||||||
Ok(state) => state,
|
Ok(state) => state,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!(
|
error!(
|
||||||
"found invalid guest state in database for uuid {}: {}",
|
"found invalid zone state in database for uuid {}: {}",
|
||||||
uuid, error
|
uuid, error
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
guests.insert(uuid, state);
|
zones.insert(uuid, state);
|
||||||
}
|
}
|
||||||
Ok(guests)
|
Ok(zones)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update(&self, id: Uuid, entry: Guest) -> Result<()> {
|
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(GUESTS)?;
|
let mut table = write.open_table(ZONES)?;
|
||||||
let bytes = entry.encode_to_vec();
|
let bytes = entry.encode_to_vec();
|
||||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||||
}
|
}
|
||||||
@ -71,7 +71,7 @@ impl GuestStore {
|
|||||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||||
let write = self.database.begin_write()?;
|
let write = self.database.begin_write()?;
|
||||||
{
|
{
|
||||||
let mut table = write.open_table(GUESTS)?;
|
let mut table = write.open_table(ZONES)?;
|
||||||
table.remove(id.to_u128_le())?;
|
table.remove(id.to_u128_le())?;
|
||||||
}
|
}
|
||||||
write.commit()?;
|
write.commit()?;
|
||||||
|
@ -31,7 +31,7 @@ impl DaemonDeviceManager {
|
|||||||
let mut devices = self.devices.write().await;
|
let mut devices = self.devices.write().await;
|
||||||
let Some(state) = devices.get_mut(device) else {
|
let Some(state) = devices.get_mut(device) else {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
"unable to claim unknown device '{}' for guest {}",
|
"unable to claim unknown device '{}' for zone {}",
|
||||||
device,
|
device,
|
||||||
uuid
|
uuid
|
||||||
));
|
));
|
||||||
@ -39,7 +39,7 @@ impl DaemonDeviceManager {
|
|||||||
|
|
||||||
if let Some(owner) = state.owner {
|
if let Some(owner) = state.owner {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
"unable to claim device '{}' for guest {}: already claimed by {}",
|
"unable to claim device '{}' for zone {}: already claimed by {}",
|
||||||
device,
|
device,
|
||||||
uuid,
|
uuid,
|
||||||
owner
|
owner
|
||||||
@ -92,7 +92,7 @@ impl DaemonDeviceManager {
|
|||||||
|
|
||||||
for (name, uuid) in &claims {
|
for (name, uuid) in &claims {
|
||||||
if !devices.contains_key(name) {
|
if !devices.contains_key(name) {
|
||||||
warn!("unknown device '{}' assigned to guest {}", name, uuid);
|
warn!("unknown device '{}' assigned to zone {}", name, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,10 +4,12 @@ use std::{
|
|||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use krata::v1::common::ZoneExitInfo;
|
||||||
use krata::{
|
use krata::{
|
||||||
idm::{internal::event::Event as EventType, internal::Event},
|
idm::{internal::event::Event as EventType, internal::Event},
|
||||||
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
v1::common::{ZoneState, ZoneStatus},
|
||||||
};
|
};
|
||||||
use log::{error, warn};
|
use log::{error, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
@ -21,8 +23,6 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{db::GuestStore, idm::DaemonIdmHandle};
|
|
||||||
|
|
||||||
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
||||||
|
|
||||||
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||||
@ -45,8 +45,8 @@ impl DaemonEventContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonEventGenerator {
|
pub struct DaemonEventGenerator {
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
feed: broadcast::Receiver<DaemonEvent>,
|
feed: broadcast::Receiver<DaemonEvent>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
||||||
@ -57,15 +57,15 @@ pub struct DaemonEventGenerator {
|
|||||||
|
|
||||||
impl DaemonEventGenerator {
|
impl DaemonEventGenerator {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
||||||
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
||||||
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
||||||
let generator = DaemonEventGenerator {
|
let generator = DaemonEventGenerator {
|
||||||
guests,
|
zones,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
feed: sender.subscribe(),
|
feed: sender.subscribe(),
|
||||||
idm,
|
idm,
|
||||||
idms: HashMap::new(),
|
idms: HashMap::new(),
|
||||||
@ -78,20 +78,20 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
||||||
let DaemonEvent::GuestChanged(changed) = event;
|
let DaemonEvent::ZoneChanged(changed) = event;
|
||||||
let Some(ref guest) = changed.guest else {
|
let Some(ref zone) = changed.zone else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let status = state.status();
|
let status = state.status();
|
||||||
let id = Uuid::from_str(&guest.id)?;
|
let id = Uuid::from_str(&zone.id)?;
|
||||||
let domid = state.domid;
|
let domid = state.domid;
|
||||||
match status {
|
match status {
|
||||||
GuestStatus::Started => {
|
ZoneStatus::Started => {
|
||||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||||
let client = self.idm.client_by_domid(domid).await?;
|
let client = self.idm.client_by_domid(domid).await?;
|
||||||
let mut receiver = client.subscribe().await?;
|
let mut receiver = client.subscribe().await?;
|
||||||
@ -111,7 +111,7 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GuestStatus::Destroyed => {
|
ZoneStatus::Destroyed => {
|
||||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||||
handle.abort();
|
handle.abort();
|
||||||
}
|
}
|
||||||
@ -130,18 +130,18 @@ impl DaemonEventGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||||
if let Some(mut guest) = self.guests.read(id).await? {
|
if let Some(mut zone) = self.zones.read(id).await? {
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Exited.into(),
|
status: ZoneStatus::Exited.into(),
|
||||||
network: guest.state.clone().unwrap_or_default().network,
|
network: zone.state.clone().unwrap_or_default().network,
|
||||||
exit_info: Some(GuestExitInfo { code }),
|
exit_info: Some(ZoneExitInfo { code }),
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
|
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||||
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
|
|
||||||
self.guests.update(id, guest).await?;
|
self.zones.update(id, zone).await?;
|
||||||
self.guest_reconciler_notify.send(id).await?;
|
self.zone_reconciler_notify.send(id).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -24,14 +24,14 @@ use tokio::{
|
|||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::glt::GuestLookupTable;
|
use crate::zlt::ZoneLookupTable;
|
||||||
|
|
||||||
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
||||||
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonIdmHandle {
|
pub struct DaemonIdmHandle {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -72,7 +72,7 @@ pub struct DaemonIdmSnoopPacket {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct DaemonIdm {
|
pub struct DaemonIdm {
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
clients: ClientMap,
|
clients: ClientMap,
|
||||||
feeds: BackendFeedMap,
|
feeds: BackendFeedMap,
|
||||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||||
@ -84,7 +84,7 @@ pub struct DaemonIdm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonIdm {
|
impl DaemonIdm {
|
||||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
|
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
|
||||||
let (service, tx_raw_sender, rx_receiver) =
|
let (service, tx_raw_sender, rx_receiver) =
|
||||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||||
let (tx_sender, tx_receiver) = channel(100);
|
let (tx_sender, tx_receiver) = channel(100);
|
||||||
|
@ -4,16 +4,15 @@ use anyhow::{anyhow, Result};
|
|||||||
use config::DaemonConfig;
|
use config::DaemonConfig;
|
||||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||||
use control::DaemonControlService;
|
use control::DaemonControlService;
|
||||||
use db::GuestStore;
|
use db::ZoneStore;
|
||||||
use devices::DaemonDeviceManager;
|
use devices::DaemonDeviceManager;
|
||||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||||
use glt::GuestLookupTable;
|
|
||||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||||
use kratart::Runtime;
|
use kratart::Runtime;
|
||||||
use log::info;
|
use log::info;
|
||||||
use reconcile::guest::GuestReconciler;
|
use reconcile::zone::ZoneReconciler;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs,
|
fs,
|
||||||
net::UnixListener,
|
net::UnixListener,
|
||||||
@ -23,6 +22,7 @@ use tokio::{
|
|||||||
use tokio_stream::wrappers::UnixListenerStream;
|
use tokio_stream::wrappers::UnixListenerStream;
|
||||||
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
use zlt::ZoneLookupTable;
|
||||||
|
|
||||||
pub mod command;
|
pub mod command;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
@ -31,21 +31,21 @@ pub mod control;
|
|||||||
pub mod db;
|
pub mod db;
|
||||||
pub mod devices;
|
pub mod devices;
|
||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod glt;
|
|
||||||
pub mod idm;
|
pub mod idm;
|
||||||
pub mod metrics;
|
pub mod metrics;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
pub mod reconcile;
|
pub mod reconcile;
|
||||||
|
pub mod zlt;
|
||||||
|
|
||||||
pub struct Daemon {
|
pub struct Daemon {
|
||||||
store: String,
|
store: String,
|
||||||
_config: Arc<DaemonConfig>,
|
_config: Arc<DaemonConfig>,
|
||||||
glt: GuestLookupTable,
|
glt: ZoneLookupTable,
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
guest_reconciler_task: JoinHandle<()>,
|
zone_reconciler_task: JoinHandle<()>,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
generator_task: JoinHandle<()>,
|
generator_task: JoinHandle<()>,
|
||||||
idm: DaemonIdmHandle,
|
idm: DaemonIdmHandle,
|
||||||
console: DaemonConsoleHandle,
|
console: DaemonConsoleHandle,
|
||||||
@ -53,7 +53,7 @@ pub struct Daemon {
|
|||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
}
|
}
|
||||||
|
|
||||||
const GUEST_RECONCILER_QUEUE_LEN: usize = 1000;
|
const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
|
||||||
|
|
||||||
impl Daemon {
|
impl Daemon {
|
||||||
pub async fn new(store: String) -> Result<Self> {
|
pub async fn new(store: String) -> Result<Self> {
|
||||||
@ -89,40 +89,40 @@ impl Daemon {
|
|||||||
generated
|
generated
|
||||||
};
|
};
|
||||||
|
|
||||||
let initrd_path = detect_guest_path(&store, "initrd")?;
|
let initrd_path = detect_zone_path(&store, "initrd")?;
|
||||||
let kernel_path = detect_guest_path(&store, "kernel")?;
|
let kernel_path = detect_zone_path(&store, "kernel")?;
|
||||||
let addons_path = detect_guest_path(&store, "addons.squashfs")?;
|
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
|
||||||
|
|
||||||
let seed = config.oci.seed.clone().map(PathBuf::from);
|
let seed = config.oci.seed.clone().map(PathBuf::from);
|
||||||
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
||||||
let runtime = Runtime::new(host_uuid).await?;
|
let runtime = Runtime::new(host_uuid).await?;
|
||||||
let glt = GuestLookupTable::new(0, host_uuid);
|
let glt = ZoneLookupTable::new(0, host_uuid);
|
||||||
let guests_db_path = format!("{}/guests.db", store);
|
let zones_db_path = format!("{}/zones.db", store);
|
||||||
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
|
||||||
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
||||||
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
||||||
let idm = DaemonIdm::new(glt.clone()).await?;
|
let idm = DaemonIdm::new(glt.clone()).await?;
|
||||||
let idm = idm.launch().await?;
|
let idm = idm.launch().await?;
|
||||||
let console = DaemonConsole::new(glt.clone()).await?;
|
let console = DaemonConsole::new(glt.clone()).await?;
|
||||||
let console = console.launch().await?;
|
let console = console.launch().await?;
|
||||||
let (events, generator) =
|
let (events, generator) =
|
||||||
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
||||||
.await?;
|
.await?;
|
||||||
let runtime_for_reconciler = runtime.dupe().await?;
|
let runtime_for_reconciler = runtime.dupe().await?;
|
||||||
let guest_reconciler = GuestReconciler::new(
|
let zone_reconciler = ZoneReconciler::new(
|
||||||
devices.clone(),
|
devices.clone(),
|
||||||
glt.clone(),
|
glt.clone(),
|
||||||
guests.clone(),
|
zones.clone(),
|
||||||
events.clone(),
|
events.clone(),
|
||||||
runtime_for_reconciler,
|
runtime_for_reconciler,
|
||||||
packer.clone(),
|
packer.clone(),
|
||||||
guest_reconciler_notify.clone(),
|
zone_reconciler_notify.clone(),
|
||||||
kernel_path,
|
kernel_path,
|
||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path,
|
addons_path,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
||||||
let generator_task = generator.launch().await?;
|
let generator_task = generator.launch().await?;
|
||||||
|
|
||||||
// TODO: Create a way of abstracting early init tasks in kratad.
|
// TODO: Create a way of abstracting early init tasks in kratad.
|
||||||
@ -139,10 +139,10 @@ impl Daemon {
|
|||||||
_config: config,
|
_config: config,
|
||||||
glt,
|
glt,
|
||||||
devices,
|
devices,
|
||||||
guests,
|
zones,
|
||||||
events,
|
events,
|
||||||
guest_reconciler_task,
|
zone_reconciler_task,
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
generator_task,
|
generator_task,
|
||||||
idm,
|
idm,
|
||||||
console,
|
console,
|
||||||
@ -158,8 +158,8 @@ impl Daemon {
|
|||||||
self.events.clone(),
|
self.events.clone(),
|
||||||
self.console.clone(),
|
self.console.clone(),
|
||||||
self.idm.clone(),
|
self.idm.clone(),
|
||||||
self.guests.clone(),
|
self.zones.clone(),
|
||||||
self.guest_reconciler_notify.clone(),
|
self.zone_reconciler_notify.clone(),
|
||||||
self.packer.clone(),
|
self.packer.clone(),
|
||||||
self.runtime.clone(),
|
self.runtime.clone(),
|
||||||
);
|
);
|
||||||
@ -214,20 +214,20 @@ impl Daemon {
|
|||||||
|
|
||||||
impl Drop for Daemon {
|
impl Drop for Daemon {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.guest_reconciler_task.abort();
|
self.zone_reconciler_task.abort();
|
||||||
self.generator_task.abort();
|
self.generator_task.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn detect_guest_path(store: &str, name: &str) -> Result<PathBuf> {
|
fn detect_zone_path(store: &str, name: &str) -> Result<PathBuf> {
|
||||||
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
|
let mut path = PathBuf::from(format!("{}/zone/{}", store, name));
|
||||||
if path.is_file() {
|
if path.is_file() {
|
||||||
return Ok(path);
|
return Ok(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
|
path = PathBuf::from(format!("/usr/share/krata/zone/{}", name));
|
||||||
if path.is_file() {
|
if path.is_file() {
|
||||||
return Ok(path);
|
return Ok(path);
|
||||||
}
|
}
|
||||||
Err(anyhow!("unable to find required guest file: {}", name))
|
Err(anyhow!("unable to find required zone file: {}", name))
|
||||||
}
|
}
|
||||||
|
@ -1,20 +1,20 @@
|
|||||||
use krata::{
|
use krata::{
|
||||||
idm::internal::{MetricFormat, MetricNode},
|
idm::internal::{MetricFormat, MetricNode},
|
||||||
v1::common::{GuestMetricFormat, GuestMetricNode},
|
v1::common::{ZoneMetricFormat, ZoneMetricNode},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
|
fn idm_metric_format_to_api(format: MetricFormat) -> ZoneMetricFormat {
|
||||||
match format {
|
match format {
|
||||||
MetricFormat::Unknown => GuestMetricFormat::Unknown,
|
MetricFormat::Unknown => ZoneMetricFormat::Unknown,
|
||||||
MetricFormat::Bytes => GuestMetricFormat::Bytes,
|
MetricFormat::Bytes => ZoneMetricFormat::Bytes,
|
||||||
MetricFormat::Integer => GuestMetricFormat::Integer,
|
MetricFormat::Integer => ZoneMetricFormat::Integer,
|
||||||
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
MetricFormat::DurationSeconds => ZoneMetricFormat::DurationSeconds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
|
pub fn idm_metric_to_api(node: MetricNode) -> ZoneMetricNode {
|
||||||
let format = node.format();
|
let format = node.format();
|
||||||
GuestMetricNode {
|
ZoneMetricNode {
|
||||||
name: node.name,
|
name: node.name,
|
||||||
value: node.value,
|
value: node.value,
|
||||||
format: idm_metric_format_to_api(format).into(),
|
format: idm_metric_format_to_api(format).into(),
|
||||||
|
@ -1 +1 @@
|
|||||||
pub mod guest;
|
pub mod zone;
|
||||||
|
@ -7,11 +7,11 @@ use std::{
|
|||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use krata::v1::{
|
use krata::v1::{
|
||||||
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
|
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
|
||||||
control::GuestChangedEvent,
|
control::ZoneChangedEvent,
|
||||||
};
|
};
|
||||||
use krataoci::packer::service::OciPackerService;
|
use krataoci::packer::service::OciPackerService;
|
||||||
use kratart::{GuestInfo, Runtime};
|
use kratart::{Runtime, ZoneInfo};
|
||||||
use log::{error, info, trace, warn};
|
use log::{error, info, trace, warn};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
@ -25,69 +25,69 @@ use tokio::{
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
db::GuestStore,
|
db::ZoneStore,
|
||||||
devices::DaemonDeviceManager,
|
devices::DaemonDeviceManager,
|
||||||
event::{DaemonEvent, DaemonEventContext},
|
event::{DaemonEvent, DaemonEventContext},
|
||||||
glt::GuestLookupTable,
|
zlt::ZoneLookupTable,
|
||||||
};
|
};
|
||||||
|
|
||||||
use self::start::GuestStarter;
|
use self::start::ZoneStarter;
|
||||||
|
|
||||||
mod start;
|
mod start;
|
||||||
|
|
||||||
const PARALLEL_LIMIT: u32 = 5;
|
const PARALLEL_LIMIT: u32 = 5;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum GuestReconcilerResult {
|
enum ZoneReconcilerResult {
|
||||||
Unchanged,
|
Unchanged,
|
||||||
Changed { rerun: bool },
|
Changed { rerun: bool },
|
||||||
}
|
}
|
||||||
|
|
||||||
struct GuestReconcilerEntry {
|
struct ZoneReconcilerEntry {
|
||||||
task: JoinHandle<()>,
|
task: JoinHandle<()>,
|
||||||
sender: Sender<()>,
|
sender: Sender<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for GuestReconcilerEntry {
|
impl Drop for ZoneReconcilerEntry {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.task.abort();
|
self.task.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestReconciler {
|
pub struct ZoneReconciler {
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
glt: GuestLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
addons_path: PathBuf,
|
addons_path: PathBuf,
|
||||||
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
reconcile_lock: Arc<RwLock<()>>,
|
zone_reconcile_lock: Arc<RwLock<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestReconciler {
|
impl ZoneReconciler {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
devices: DaemonDeviceManager,
|
devices: DaemonDeviceManager,
|
||||||
glt: GuestLookupTable,
|
zlt: ZoneLookupTable,
|
||||||
guests: GuestStore,
|
zones: ZoneStore,
|
||||||
events: DaemonEventContext,
|
events: DaemonEventContext,
|
||||||
runtime: Runtime,
|
runtime: Runtime,
|
||||||
packer: OciPackerService,
|
packer: OciPackerService,
|
||||||
guest_reconciler_notify: Sender<Uuid>,
|
zone_reconciler_notify: Sender<Uuid>,
|
||||||
kernel_path: PathBuf,
|
kernel_path: PathBuf,
|
||||||
initrd_path: PathBuf,
|
initrd_path: PathBuf,
|
||||||
modules_path: PathBuf,
|
modules_path: PathBuf,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
devices,
|
devices,
|
||||||
glt,
|
zlt,
|
||||||
guests,
|
zones,
|
||||||
events,
|
events,
|
||||||
runtime,
|
runtime,
|
||||||
packer,
|
packer,
|
||||||
@ -95,8 +95,8 @@ impl GuestReconciler {
|
|||||||
initrd_path,
|
initrd_path,
|
||||||
addons_path: modules_path,
|
addons_path: modules_path,
|
||||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||||
guest_reconciler_notify,
|
zone_reconciler_notify,
|
||||||
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,13 +115,13 @@ impl GuestReconciler {
|
|||||||
|
|
||||||
Some(uuid) => {
|
Some(uuid) => {
|
||||||
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
||||||
error!("failed to start guest reconciler task {}: {}", uuid, error);
|
error!("failed to start zone reconciler task {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let map = self.tasks.lock().await;
|
let map = self.tasks.lock().await;
|
||||||
if let Some(entry) = map.get(&uuid) {
|
if let Some(entry) = map.get(&uuid) {
|
||||||
if let Err(error) = entry.sender.send(()).await {
|
if let Err(error) = entry.sender.send(()).await {
|
||||||
error!("failed to notify guest reconciler task {}: {}", uuid, error);
|
error!("failed to notify zone reconciler task {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,52 +138,52 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
||||||
let _permit = self.reconcile_lock.write().await;
|
let _permit = self.zone_reconcile_lock.write().await;
|
||||||
trace!("reconciling runtime");
|
trace!("reconciling runtime");
|
||||||
let runtime_guests = self.runtime.list().await?;
|
let runtime_zones = self.runtime.list().await?;
|
||||||
let stored_guests = self.guests.list().await?;
|
let stored_zones = self.zones.list().await?;
|
||||||
|
|
||||||
let non_existent_guests = runtime_guests
|
let non_existent_zones = runtime_zones
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
|
.filter(|x| !stored_zones.iter().any(|g| *g.0 == x.uuid))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for guest in non_existent_guests {
|
for zone in non_existent_zones {
|
||||||
warn!("destroying unknown runtime guest {}", guest.uuid);
|
warn!("destroying unknown runtime zone {}", zone.uuid);
|
||||||
if let Err(error) = self.runtime.destroy(guest.uuid).await {
|
if let Err(error) = self.runtime.destroy(zone.uuid).await {
|
||||||
error!(
|
error!(
|
||||||
"failed to destroy unknown runtime guest {}: {}",
|
"failed to destroy unknown runtime zone {}: {}",
|
||||||
guest.uuid, error
|
zone.uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
self.guests.remove(guest.uuid).await?;
|
self.zones.remove(zone.uuid).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut device_claims = HashMap::new();
|
let mut device_claims = HashMap::new();
|
||||||
|
|
||||||
for (uuid, mut stored_guest) in stored_guests {
|
for (uuid, mut stored_zone) in stored_zones {
|
||||||
let previous_guest = stored_guest.clone();
|
let previous_zone = stored_zone.clone();
|
||||||
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
|
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
||||||
match runtime_guest {
|
match runtime_zone {
|
||||||
None => {
|
None => {
|
||||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||||
if state.status() == GuestStatus::Started {
|
if state.status() == ZoneStatus::Started {
|
||||||
state.status = GuestStatus::Starting.into();
|
state.status = ZoneStatus::Starting.into();
|
||||||
}
|
}
|
||||||
stored_guest.state = Some(state);
|
stored_zone.state = Some(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(runtime) => {
|
Some(runtime) => {
|
||||||
self.glt.associate(uuid, runtime.domid).await;
|
self.zlt.associate(uuid, runtime.domid).await;
|
||||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||||
if let Some(code) = runtime.state.exit_code {
|
if let Some(code) = runtime.state.exit_code {
|
||||||
state.status = GuestStatus::Exited.into();
|
state.status = ZoneStatus::Exited.into();
|
||||||
state.exit_info = Some(GuestExitInfo { code });
|
state.exit_info = Some(ZoneExitInfo { code });
|
||||||
} else {
|
} else {
|
||||||
state.status = GuestStatus::Started.into();
|
state.status = ZoneStatus::Started.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
for device in &stored_guest
|
for device in &stored_zone
|
||||||
.spec
|
.spec
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.cloned()
|
.cloned()
|
||||||
@ -193,16 +193,16 @@ impl GuestReconciler {
|
|||||||
device_claims.insert(device.name.clone(), uuid);
|
device_claims.insert(device.name.clone(), uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
state.network = Some(guestinfo_to_networkstate(runtime));
|
state.network = Some(zoneinfo_to_networkstate(runtime));
|
||||||
stored_guest.state = Some(state);
|
stored_zone.state = Some(state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let changed = stored_guest != previous_guest;
|
let changed = stored_zone != previous_zone;
|
||||||
|
|
||||||
if changed || initial {
|
if changed || initial {
|
||||||
self.guests.update(uuid, stored_guest).await?;
|
self.zones.update(uuid, stored_zone).await?;
|
||||||
let _ = self.guest_reconciler_notify.try_send(uuid);
|
let _ = self.zone_reconciler_notify.try_send(uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,59 +212,59 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
||||||
let _runtime_reconcile_permit = self.reconcile_lock.read().await;
|
let _runtime_reconcile_permit = self.zone_reconcile_lock.read().await;
|
||||||
let Some(mut guest) = self.guests.read(uuid).await? else {
|
let Some(mut zone) = self.zones.read(uuid).await? else {
|
||||||
warn!(
|
warn!(
|
||||||
"notified of reconcile for guest {} but it didn't exist",
|
"notified of reconcile for zone {} but it didn't exist",
|
||||||
uuid
|
uuid
|
||||||
);
|
);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("reconciling guest {}", uuid);
|
info!("reconciling zone {}", uuid);
|
||||||
|
|
||||||
self.events
|
self.events
|
||||||
.send(DaemonEvent::GuestChanged(GuestChangedEvent {
|
.send(DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||||
guest: Some(guest.clone()),
|
zone: Some(zone.clone()),
|
||||||
}))?;
|
}))?;
|
||||||
|
|
||||||
let start_status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||||
let result = match start_status {
|
let result = match start_status {
|
||||||
GuestStatus::Starting => self.start(uuid, &mut guest).await,
|
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
|
||||||
GuestStatus::Exited => self.exited(&mut guest).await,
|
ZoneStatus::Exited => self.exited(&mut zone).await,
|
||||||
GuestStatus::Destroying => self.destroy(uuid, &mut guest).await,
|
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
|
||||||
_ => Ok(GuestReconcilerResult::Unchanged),
|
_ => Ok(ZoneReconcilerResult::Unchanged),
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = match result {
|
let result = match result {
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||||
guest.state.as_mut().unwrap().status = GuestStatus::Failed.into();
|
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
|
||||||
guest.state.as_mut().unwrap().error_info = Some(GuestErrorInfo {
|
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
|
||||||
message: error.to_string(),
|
message: error.to_string(),
|
||||||
});
|
});
|
||||||
warn!("failed to start guest {}: {}", guest.id, error);
|
warn!("failed to start zone {}: {}", zone.id, error);
|
||||||
GuestReconcilerResult::Changed { rerun: false }
|
ZoneReconcilerResult::Changed { rerun: false }
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
info!("reconciled guest {}", uuid);
|
info!("reconciled zone {}", uuid);
|
||||||
|
|
||||||
let status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||||
let destroyed = status == GuestStatus::Destroyed;
|
let destroyed = status == ZoneStatus::Destroyed;
|
||||||
|
|
||||||
let rerun = if let GuestReconcilerResult::Changed { rerun } = result {
|
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
||||||
let event = DaemonEvent::GuestChanged(GuestChangedEvent {
|
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||||
guest: Some(guest.clone()),
|
zone: Some(zone.clone()),
|
||||||
});
|
});
|
||||||
|
|
||||||
if destroyed {
|
if destroyed {
|
||||||
self.guests.remove(uuid).await?;
|
self.zones.remove(uuid).await?;
|
||||||
let mut map = self.tasks.lock().await;
|
let mut map = self.tasks.lock().await;
|
||||||
map.remove(&uuid);
|
map.remove(&uuid);
|
||||||
} else {
|
} else {
|
||||||
self.guests.update(uuid, guest.clone()).await?;
|
self.zones.update(uuid, zone.clone()).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.events.send(event)?;
|
self.events.send(event)?;
|
||||||
@ -276,50 +276,50 @@ impl GuestReconciler {
|
|||||||
Ok(rerun)
|
Ok(rerun)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let starter = GuestStarter {
|
let starter = ZoneStarter {
|
||||||
devices: &self.devices,
|
devices: &self.devices,
|
||||||
kernel_path: &self.kernel_path,
|
kernel_path: &self.kernel_path,
|
||||||
initrd_path: &self.initrd_path,
|
initrd_path: &self.initrd_path,
|
||||||
addons_path: &self.addons_path,
|
addons_path: &self.addons_path,
|
||||||
packer: &self.packer,
|
packer: &self.packer,
|
||||||
glt: &self.glt,
|
glt: &self.zlt,
|
||||||
runtime: &self.runtime,
|
runtime: &self.runtime,
|
||||||
};
|
};
|
||||||
starter.start(uuid, guest).await
|
starter.start(uuid, zone).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
if let Some(ref mut state) = guest.state {
|
if let Some(ref mut state) = zone.state {
|
||||||
state.set_status(GuestStatus::Destroying);
|
state.set_status(ZoneStatus::Destroying);
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: true })
|
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
||||||
} else {
|
} else {
|
||||||
Ok(GuestReconcilerResult::Unchanged)
|
Ok(ZoneReconcilerResult::Unchanged)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
async fn destroy(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
if let Err(error) = self.runtime.destroy(uuid).await {
|
if let Err(error) = self.runtime.destroy(uuid).await {
|
||||||
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let domid = guest.state.as_ref().map(|x| x.domid);
|
let domid = zone.state.as_ref().map(|x| x.domid);
|
||||||
|
|
||||||
if let Some(domid) = domid {
|
if let Some(domid) = domid {
|
||||||
self.glt.remove(uuid, domid).await;
|
self.zlt.remove(uuid, domid).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("destroyed guest {}", uuid);
|
info!("destroyed zone {}", uuid);
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Destroyed.into(),
|
status: ZoneStatus::Destroyed.into(),
|
||||||
network: None,
|
network: None,
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: self.glt.host_uuid().to_string(),
|
host: self.zlt.host_uuid().to_string(),
|
||||||
domid: domid.unwrap_or(u32::MAX),
|
domid: domid.unwrap_or(u32::MAX),
|
||||||
});
|
});
|
||||||
self.devices.release_all(uuid).await?;
|
self.devices.release_all(uuid).await?;
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
||||||
@ -333,7 +333,7 @@ impl GuestReconciler {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn launch_task(&self, uuid: Uuid) -> Result<GuestReconcilerEntry> {
|
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
let (sender, mut receiver) = channel(10);
|
let (sender, mut receiver) = channel(10);
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
@ -346,7 +346,7 @@ impl GuestReconciler {
|
|||||||
let rerun = match this.reconcile(uuid).await {
|
let rerun = match this.reconcile(uuid).await {
|
||||||
Ok(rerun) => rerun,
|
Ok(rerun) => rerun,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
error!("failed to reconcile zone {}: {}", uuid, error);
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -358,15 +358,15 @@ impl GuestReconciler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(GuestReconcilerEntry { task, sender })
|
Ok(ZoneReconcilerEntry { task, sender })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
|
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
|
||||||
GuestNetworkState {
|
ZoneNetworkState {
|
||||||
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
guest_mac: info.guest_mac.as_ref().cloned().unwrap_or_default(),
|
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
|
||||||
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||||
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
@ -6,40 +6,40 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use krata::launchcfg::LaunchPackedFormat;
|
use krata::launchcfg::LaunchPackedFormat;
|
||||||
use krata::v1::common::GuestOciImageSpec;
|
use krata::v1::common::ZoneOciImageSpec;
|
||||||
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
|
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
||||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
||||||
use kratart::{launch::GuestLaunchRequest, Runtime};
|
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
||||||
use log::info;
|
use log::info;
|
||||||
|
|
||||||
|
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
||||||
|
use crate::devices::DaemonDeviceManager;
|
||||||
|
use crate::{
|
||||||
|
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
|
||||||
|
zlt::ZoneLookupTable,
|
||||||
|
};
|
||||||
|
use krata::v1::common::zone_image_spec::Image;
|
||||||
use tokio::fs::{self, File};
|
use tokio::fs::{self, File};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio_tar::Archive;
|
use tokio_tar::Archive;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
pub struct ZoneStarter<'a> {
|
||||||
use crate::devices::DaemonDeviceManager;
|
|
||||||
use crate::{
|
|
||||||
glt::GuestLookupTable,
|
|
||||||
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct GuestStarter<'a> {
|
|
||||||
pub devices: &'a DaemonDeviceManager,
|
pub devices: &'a DaemonDeviceManager,
|
||||||
pub kernel_path: &'a Path,
|
pub kernel_path: &'a Path,
|
||||||
pub initrd_path: &'a Path,
|
pub initrd_path: &'a Path,
|
||||||
pub addons_path: &'a Path,
|
pub addons_path: &'a Path,
|
||||||
pub packer: &'a OciPackerService,
|
pub packer: &'a OciPackerService,
|
||||||
pub glt: &'a GuestLookupTable,
|
pub glt: &'a ZoneLookupTable,
|
||||||
pub runtime: &'a Runtime,
|
pub runtime: &'a Runtime,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestStarter<'_> {
|
impl ZoneStarter<'_> {
|
||||||
pub async fn oci_spec_tar_read_file(
|
pub async fn oci_spec_tar_read_file(
|
||||||
&self,
|
&self,
|
||||||
file: &Path,
|
file: &Path,
|
||||||
oci: &GuestOciImageSpec,
|
oci: &ZoneOciImageSpec,
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
if oci.format() != OciImageFormat::Tar {
|
if oci.format() != OciImageFormat::Tar {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
@ -75,9 +75,9 @@ impl GuestStarter<'_> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||||
let Some(ref spec) = guest.spec else {
|
let Some(ref spec) = zone.spec else {
|
||||||
return Err(anyhow!("guest spec not specified"));
|
return Err(anyhow!("zone spec not specified"));
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(ref image) = spec.image else {
|
let Some(ref image) = spec.image else {
|
||||||
@ -100,7 +100,7 @@ impl GuestStarter<'_> {
|
|||||||
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||||
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||||
OciImageFormat::Tar => {
|
OciImageFormat::Tar => {
|
||||||
return Err(anyhow!("tar image format is not supported for guests"));
|
return Err(anyhow!("tar image format is not supported for zones"));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -176,7 +176,7 @@ impl GuestStarter<'_> {
|
|||||||
|
|
||||||
let info = self
|
let info = self
|
||||||
.runtime
|
.runtime
|
||||||
.launch(GuestLaunchRequest {
|
.launch(ZoneLaunchRequest {
|
||||||
format: LaunchPackedFormat::Squashfs,
|
format: LaunchPackedFormat::Squashfs,
|
||||||
uuid: Some(uuid),
|
uuid: Some(uuid),
|
||||||
name: if spec.name.is_empty() {
|
name: if spec.name.is_empty() {
|
||||||
@ -201,17 +201,17 @@ impl GuestStarter<'_> {
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
self.glt.associate(uuid, info.domid).await;
|
self.glt.associate(uuid, info.domid).await;
|
||||||
info!("started guest {}", uuid);
|
info!("started zone {}", uuid);
|
||||||
guest.state = Some(GuestState {
|
zone.state = Some(ZoneState {
|
||||||
status: GuestStatus::Started.into(),
|
status: ZoneStatus::Started.into(),
|
||||||
network: Some(guestinfo_to_networkstate(&info)),
|
network: Some(zoneinfo_to_networkstate(&info)),
|
||||||
exit_info: None,
|
exit_info: None,
|
||||||
error_info: None,
|
error_info: None,
|
||||||
host: self.glt.host_uuid().to_string(),
|
host: self.glt.host_uuid().to_string(),
|
||||||
domid: info.domid,
|
domid: info.domid,
|
||||||
});
|
});
|
||||||
success.store(true, Ordering::Release);
|
success.store(true, Ordering::Release);
|
||||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -3,18 +3,18 @@ use std::{collections::HashMap, sync::Arc};
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
struct GuestLookupTableState {
|
struct ZoneLookupTableState {
|
||||||
domid_to_uuid: HashMap<u32, Uuid>,
|
domid_to_uuid: HashMap<u32, Uuid>,
|
||||||
uuid_to_domid: HashMap<Uuid, u32>,
|
uuid_to_domid: HashMap<Uuid, u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLookupTableState {
|
impl ZoneLookupTableState {
|
||||||
pub fn new(host_uuid: Uuid) -> Self {
|
pub fn new(host_uuid: Uuid) -> Self {
|
||||||
let mut domid_to_uuid = HashMap::new();
|
let mut domid_to_uuid = HashMap::new();
|
||||||
let mut uuid_to_domid = HashMap::new();
|
let mut uuid_to_domid = HashMap::new();
|
||||||
domid_to_uuid.insert(0, host_uuid);
|
domid_to_uuid.insert(0, host_uuid);
|
||||||
uuid_to_domid.insert(host_uuid, 0);
|
uuid_to_domid.insert(host_uuid, 0);
|
||||||
GuestLookupTableState {
|
ZoneLookupTableState {
|
||||||
domid_to_uuid,
|
domid_to_uuid,
|
||||||
uuid_to_domid,
|
uuid_to_domid,
|
||||||
}
|
}
|
||||||
@ -22,18 +22,18 @@ impl GuestLookupTableState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct GuestLookupTable {
|
pub struct ZoneLookupTable {
|
||||||
host_domid: u32,
|
host_domid: u32,
|
||||||
host_uuid: Uuid,
|
host_uuid: Uuid,
|
||||||
state: Arc<RwLock<GuestLookupTableState>>,
|
state: Arc<RwLock<ZoneLookupTableState>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLookupTable {
|
impl ZoneLookupTable {
|
||||||
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
||||||
GuestLookupTable {
|
ZoneLookupTable {
|
||||||
host_domid,
|
host_domid,
|
||||||
host_uuid,
|
host_uuid,
|
||||||
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
|
state: Arc::new(RwLock::new(ZoneLookupTableState::new(host_uuid))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
use anyhow::{anyhow, Result};
|
|
||||||
use env_logger::Env;
|
|
||||||
use krataguest::{death, init::GuestInit};
|
|
||||||
use log::error;
|
|
||||||
use std::env;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
env::set_var("RUST_BACKTRACE", "1");
|
|
||||||
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
|
|
||||||
if env::var("KRATA_UNSAFE_ALWAYS_ALLOW_INIT").unwrap_or("0".to_string()) != "1" {
|
|
||||||
let pid = std::process::id();
|
|
||||||
if pid > 3 {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"not running because the pid of {} indicates this is probably not \
|
|
||||||
the right context for the init daemon. \
|
|
||||||
run with KRATA_UNSAFE_ALWAYS_ALLOW_INIT=1 to bypass this check",
|
|
||||||
pid
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut guest = GuestInit::new();
|
|
||||||
if let Err(error) = guest.init().await {
|
|
||||||
error!("failed to initialize guest: {}", error);
|
|
||||||
death(127).await?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
death(1).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -8,29 +8,29 @@ option java_outer_classname = "CommonProto";
|
|||||||
|
|
||||||
import "google/protobuf/struct.proto";
|
import "google/protobuf/struct.proto";
|
||||||
|
|
||||||
message Guest {
|
message Zone {
|
||||||
string id = 1;
|
string id = 1;
|
||||||
GuestSpec spec = 2;
|
ZoneSpec spec = 2;
|
||||||
GuestState state = 3;
|
ZoneState state = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpec {
|
message ZoneSpec {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
GuestImageSpec image = 2;
|
ZoneImageSpec image = 2;
|
||||||
// If not specified, defaults to the daemon default kernel.
|
// If not specified, defaults to the daemon default kernel.
|
||||||
GuestImageSpec kernel = 3;
|
ZoneImageSpec kernel = 3;
|
||||||
// If not specified, defaults to the daemon default initrd.
|
// If not specified, defaults to the daemon default initrd.
|
||||||
GuestImageSpec initrd = 4;
|
ZoneImageSpec initrd = 4;
|
||||||
uint32 vcpus = 5;
|
uint32 vcpus = 5;
|
||||||
uint64 mem = 6;
|
uint64 mem = 6;
|
||||||
GuestTaskSpec task = 7;
|
ZoneTaskSpec task = 7;
|
||||||
repeated GuestSpecAnnotation annotations = 8;
|
repeated ZoneSpecAnnotation annotations = 8;
|
||||||
repeated GuestSpecDevice devices = 9;
|
repeated ZoneSpecDevice devices = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestImageSpec {
|
message ZoneImageSpec {
|
||||||
oneof image {
|
oneof image {
|
||||||
GuestOciImageSpec oci = 1;
|
ZoneOciImageSpec oci = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,77 +42,77 @@ enum OciImageFormat {
|
|||||||
OCI_IMAGE_FORMAT_TAR = 3;
|
OCI_IMAGE_FORMAT_TAR = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestOciImageSpec {
|
message ZoneOciImageSpec {
|
||||||
string digest = 1;
|
string digest = 1;
|
||||||
OciImageFormat format = 2;
|
OciImageFormat format = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpec {
|
message ZoneTaskSpec {
|
||||||
repeated GuestTaskSpecEnvVar environment = 1;
|
repeated ZoneTaskSpecEnvVar environment = 1;
|
||||||
repeated string command = 2;
|
repeated string command = 2;
|
||||||
string working_directory = 3;
|
string working_directory = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestTaskSpecEnvVar {
|
message ZoneTaskSpecEnvVar {
|
||||||
string key = 1;
|
string key = 1;
|
||||||
string value = 2;
|
string value = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpecAnnotation {
|
message ZoneSpecAnnotation {
|
||||||
string key = 1;
|
string key = 1;
|
||||||
string value = 2;
|
string value = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestSpecDevice {
|
message ZoneSpecDevice {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestState {
|
message ZoneState {
|
||||||
GuestStatus status = 1;
|
ZoneStatus status = 1;
|
||||||
GuestNetworkState network = 2;
|
ZoneNetworkState network = 2;
|
||||||
GuestExitInfo exit_info = 3;
|
ZoneExitInfo exit_info = 3;
|
||||||
GuestErrorInfo error_info = 4;
|
ZoneErrorInfo error_info = 4;
|
||||||
string host = 5;
|
string host = 5;
|
||||||
uint32 domid = 6;
|
uint32 domid = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestStatus {
|
enum ZoneStatus {
|
||||||
GUEST_STATUS_UNKNOWN = 0;
|
ZONE_STATUS_UNKNOWN = 0;
|
||||||
GUEST_STATUS_STARTING = 1;
|
ZONE_STATUS_STARTING = 1;
|
||||||
GUEST_STATUS_STARTED = 2;
|
ZONE_STATUS_STARTED = 2;
|
||||||
GUEST_STATUS_EXITED = 3;
|
ZONE_STATUS_EXITED = 3;
|
||||||
GUEST_STATUS_DESTROYING = 4;
|
ZONE_STATUS_DESTROYING = 4;
|
||||||
GUEST_STATUS_DESTROYED = 5;
|
ZONE_STATUS_DESTROYED = 5;
|
||||||
GUEST_STATUS_FAILED = 6;
|
ZONE_STATUS_FAILED = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestNetworkState {
|
message ZoneNetworkState {
|
||||||
string guest_ipv4 = 1;
|
string zone_ipv4 = 1;
|
||||||
string guest_ipv6 = 2;
|
string zone_ipv6 = 2;
|
||||||
string guest_mac = 3;
|
string zone_mac = 3;
|
||||||
string gateway_ipv4 = 4;
|
string gateway_ipv4 = 4;
|
||||||
string gateway_ipv6 = 5;
|
string gateway_ipv6 = 5;
|
||||||
string gateway_mac = 6;
|
string gateway_mac = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestExitInfo {
|
message ZoneExitInfo {
|
||||||
int32 code = 1;
|
int32 code = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestErrorInfo {
|
message ZoneErrorInfo {
|
||||||
string message = 1;
|
string message = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestMetricNode {
|
message ZoneMetricNode {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
google.protobuf.Value value = 2;
|
google.protobuf.Value value = 2;
|
||||||
GuestMetricFormat format = 3;
|
ZoneMetricFormat format = 3;
|
||||||
repeated GuestMetricNode children = 4;
|
repeated ZoneMetricNode children = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GuestMetricFormat {
|
enum ZoneMetricFormat {
|
||||||
GUEST_METRIC_FORMAT_UNKNOWN = 0;
|
ZONE_METRIC_FORMAT_UNKNOWN = 0;
|
||||||
GUEST_METRIC_FORMAT_BYTES = 1;
|
ZONE_METRIC_FORMAT_BYTES = 1;
|
||||||
GUEST_METRIC_FORMAT_INTEGER = 2;
|
ZONE_METRIC_FORMAT_INTEGER = 2;
|
||||||
GUEST_METRIC_FORMAT_DURATION_SECONDS = 3;
|
ZONE_METRIC_FORMAT_DURATION_SECONDS = 3;
|
||||||
}
|
}
|
||||||
|
@ -12,17 +12,17 @@ import "krata/v1/common.proto";
|
|||||||
service ControlService {
|
service ControlService {
|
||||||
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
rpc IdentifyHost(IdentifyHostRequest) returns (IdentifyHostReply);
|
||||||
|
|
||||||
rpc CreateGuest(CreateGuestRequest) returns (CreateGuestReply);
|
rpc CreateZone(CreateZoneRequest) returns (CreateZoneReply);
|
||||||
rpc DestroyGuest(DestroyGuestRequest) returns (DestroyGuestReply);
|
rpc DestroyZone(DestroyZoneRequest) returns (DestroyZoneReply);
|
||||||
rpc ResolveGuest(ResolveGuestRequest) returns (ResolveGuestReply);
|
rpc ResolveZone(ResolveZoneRequest) returns (ResolveZoneReply);
|
||||||
rpc ListGuests(ListGuestsRequest) returns (ListGuestsReply);
|
rpc ListZones(ListZonesRequest) returns (ListZonesReply);
|
||||||
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
rpc ListDevices(ListDevicesRequest) returns (ListDevicesReply);
|
||||||
|
|
||||||
rpc ExecGuest(stream ExecGuestRequest) returns (stream ExecGuestReply);
|
rpc ExecZone(stream ExecZoneRequest) returns (stream ExecZoneReply);
|
||||||
|
|
||||||
|
rpc AttachZoneConsole(stream ZoneConsoleRequest) returns (stream ZoneConsoleReply);
|
||||||
|
rpc ReadZoneMetrics(ReadZoneMetricsRequest) returns (ReadZoneMetricsReply);
|
||||||
|
|
||||||
rpc ConsoleData(stream ConsoleDataRequest) returns (stream ConsoleDataReply);
|
|
||||||
rpc ReadGuestMetrics(ReadGuestMetricsRequest) returns (ReadGuestMetricsReply);
|
|
||||||
|
|
||||||
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
rpc SnoopIdm(SnoopIdmRequest) returns (stream SnoopIdmReply);
|
||||||
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
rpc WatchEvents(WatchEventsRequest) returns (stream WatchEventsReply);
|
||||||
|
|
||||||
@ -40,41 +40,41 @@ message IdentifyHostReply {
|
|||||||
string krata_version = 3;
|
string krata_version = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateGuestRequest {
|
message CreateZoneRequest {
|
||||||
krata.v1.common.GuestSpec spec = 1;
|
krata.v1.common.ZoneSpec spec = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateGuestReply {
|
message CreateZoneReply {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyGuestRequest {
|
message DestroyZoneRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message DestroyGuestReply {}
|
message DestroyZoneReply {}
|
||||||
|
|
||||||
message ResolveGuestRequest {
|
message ResolveZoneRequest {
|
||||||
string name = 1;
|
string name = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ResolveGuestReply {
|
message ResolveZoneReply {
|
||||||
krata.v1.common.Guest guest = 1;
|
krata.v1.common.Zone Zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListGuestsRequest {}
|
message ListZonesRequest {}
|
||||||
|
|
||||||
message ListGuestsReply {
|
message ListZonesReply {
|
||||||
repeated krata.v1.common.Guest guests = 1;
|
repeated krata.v1.common.Zone Zones = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecGuestRequest {
|
message ExecZoneRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
krata.v1.common.GuestTaskSpec task = 2;
|
krata.v1.common.ZoneTaskSpec task = 2;
|
||||||
bytes data = 3;
|
bytes data = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ExecGuestReply {
|
message ExecZoneReply {
|
||||||
bool exited = 1;
|
bool exited = 1;
|
||||||
string error = 2;
|
string error = 2;
|
||||||
int32 exit_code = 3;
|
int32 exit_code = 3;
|
||||||
@ -82,12 +82,12 @@ message ExecGuestReply {
|
|||||||
bytes stderr = 5;
|
bytes stderr = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ConsoleDataRequest {
|
message ZoneConsoleRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
bytes data = 2;
|
bytes data = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ConsoleDataReply {
|
message ZoneConsoleReply {
|
||||||
bytes data = 1;
|
bytes data = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,20 +95,20 @@ message WatchEventsRequest {}
|
|||||||
|
|
||||||
message WatchEventsReply {
|
message WatchEventsReply {
|
||||||
oneof event {
|
oneof event {
|
||||||
GuestChangedEvent guest_changed = 1;
|
ZoneChangedEvent Zone_changed = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
message GuestChangedEvent {
|
message ZoneChangedEvent {
|
||||||
krata.v1.common.Guest guest = 1;
|
krata.v1.common.Zone Zone = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadGuestMetricsRequest {
|
message ReadZoneMetricsRequest {
|
||||||
string guest_id = 1;
|
string Zone_id = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReadGuestMetricsReply {
|
message ReadZoneMetricsReply {
|
||||||
krata.v1.common.GuestMetricNode root = 1;
|
krata.v1.common.ZoneMetricNode root = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SnoopIdmRequest {}
|
message SnoopIdmRequest {}
|
||||||
|
@ -2,10 +2,10 @@ use anyhow::Result;
|
|||||||
use krata::{
|
use krata::{
|
||||||
events::EventStream,
|
events::EventStream,
|
||||||
v1::{
|
v1::{
|
||||||
common::Guest,
|
common::Zone,
|
||||||
control::{
|
control::{
|
||||||
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
control_service_client::ControlServiceClient, watch_events_reply::Event,
|
||||||
ListGuestsRequest,
|
ListZonesRequest,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -33,7 +33,7 @@ pub struct NetworkSide {
|
|||||||
pub struct NetworkMetadata {
|
pub struct NetworkMetadata {
|
||||||
pub domid: u32,
|
pub domid: u32,
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub guest: NetworkSide,
|
pub zone: NetworkSide,
|
||||||
pub gateway: NetworkSide,
|
pub gateway: NetworkSide,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,23 +60,23 @@ impl AutoNetworkWatcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(&mut self) -> Result<Vec<NetworkMetadata>> {
|
pub async fn read(&mut self) -> Result<Vec<NetworkMetadata>> {
|
||||||
let mut all_guests: HashMap<Uuid, Guest> = HashMap::new();
|
let mut all_zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||||
for guest in self
|
for zone in self
|
||||||
.control
|
.control
|
||||||
.list_guests(ListGuestsRequest {})
|
.list_zones(ListZonesRequest {})
|
||||||
.await?
|
.await?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.guests
|
.zones
|
||||||
{
|
{
|
||||||
let Ok(uuid) = Uuid::from_str(&guest.id) else {
|
let Ok(uuid) = Uuid::from_str(&zone.id) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
all_guests.insert(uuid, guest);
|
all_zones.insert(uuid, zone);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
let mut networks: Vec<NetworkMetadata> = Vec::new();
|
||||||
for (uuid, guest) in &all_guests {
|
for (uuid, zone) in &all_zones {
|
||||||
let Some(ref state) = guest.state else {
|
let Some(ref state) = zone.state else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -88,15 +88,15 @@ impl AutoNetworkWatcher {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_ipv4_cidr) = Ipv4Cidr::from_str(&network.guest_ipv4) else {
|
let Ok(zone_ipv4_cidr) = Ipv4Cidr::from_str(&network.zone_ipv4) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_ipv6_cidr) = Ipv6Cidr::from_str(&network.guest_ipv6) else {
|
let Ok(zone_ipv6_cidr) = Ipv6Cidr::from_str(&network.zone_ipv6) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(guest_mac) = EthernetAddress::from_str(&network.guest_mac) else {
|
let Ok(zone_mac) = EthernetAddress::from_str(&network.zone_mac) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -115,10 +115,10 @@ impl AutoNetworkWatcher {
|
|||||||
networks.push(NetworkMetadata {
|
networks.push(NetworkMetadata {
|
||||||
domid: state.domid,
|
domid: state.domid,
|
||||||
uuid: *uuid,
|
uuid: *uuid,
|
||||||
guest: NetworkSide {
|
zone: NetworkSide {
|
||||||
ipv4: guest_ipv4_cidr,
|
ipv4: zone_ipv4_cidr,
|
||||||
ipv6: guest_ipv6_cidr,
|
ipv6: zone_ipv6_cidr,
|
||||||
mac: guest_mac,
|
mac: zone_mac,
|
||||||
},
|
},
|
||||||
gateway: NetworkSide {
|
gateway: NetworkSide {
|
||||||
ipv4: gateway_ipv4_cidr,
|
ipv4: gateway_ipv4_cidr,
|
||||||
@ -175,7 +175,7 @@ impl AutoNetworkWatcher {
|
|||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
x = receiver.recv() => match x {
|
x = receiver.recv() => match x {
|
||||||
Ok(Event::GuestChanged(_)) => {
|
Ok(Event::ZoneChanged(_)) => {
|
||||||
break;
|
break;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -54,11 +54,11 @@ impl NetworkStack<'_> {
|
|||||||
match what {
|
match what {
|
||||||
NetworkStackSelect::Receive(Some(packet)) => {
|
NetworkStackSelect::Receive(Some(packet)) => {
|
||||||
if let Err(error) = self.bridge.to_bridge_sender.try_send(packet.clone()) {
|
if let Err(error) = self.bridge.to_bridge_sender.try_send(packet.clone()) {
|
||||||
trace!("failed to send guest packet to bridge: {}", error);
|
trace!("failed to send zone packet to bridge: {}", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(error) = self.nat.receive_sender.try_send(packet.clone()) {
|
if let Err(error) = self.nat.receive_sender.try_send(packet.clone()) {
|
||||||
trace!("failed to send guest packet to nat: {}", error);
|
trace!("failed to send zone packet to nat: {}", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.udev.rx = Some(packet);
|
self.udev.rx = Some(packet);
|
||||||
@ -137,7 +137,7 @@ impl NetworkBackend {
|
|||||||
.expect("failed to set ip addresses");
|
.expect("failed to set ip addresses");
|
||||||
});
|
});
|
||||||
let sockets = SocketSet::new(vec![]);
|
let sockets = SocketSet::new(vec![]);
|
||||||
let handle = self.bridge.join(self.metadata.guest.mac).await?;
|
let handle = self.bridge.join(self.metadata.zone.mac).await?;
|
||||||
let kdev = AsyncRawSocketChannel::new(mtu, kdev)?;
|
let kdev = AsyncRawSocketChannel::new(mtu, kdev)?;
|
||||||
Ok(NetworkStack {
|
Ok(NetworkStack {
|
||||||
tx: tx_receiver,
|
tx: tx_receiver,
|
||||||
@ -153,12 +153,12 @@ impl NetworkBackend {
|
|||||||
pub async fn launch(self) -> Result<JoinHandle<()>> {
|
pub async fn launch(self) -> Result<JoinHandle<()>> {
|
||||||
Ok(tokio::task::spawn(async move {
|
Ok(tokio::task::spawn(async move {
|
||||||
info!(
|
info!(
|
||||||
"launched network backend for krata guest {}",
|
"launched network backend for krata zone {}",
|
||||||
self.metadata.uuid
|
self.metadata.uuid
|
||||||
);
|
);
|
||||||
if let Err(error) = self.run().await {
|
if let Err(error) = self.run().await {
|
||||||
warn!(
|
warn!(
|
||||||
"network backend for krata guest {} failed: {}",
|
"network backend for krata zone {} failed: {}",
|
||||||
self.metadata.uuid, error
|
self.metadata.uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ impl NetworkBackend {
|
|||||||
impl Drop for NetworkBackend {
|
impl Drop for NetworkBackend {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
info!(
|
info!(
|
||||||
"destroyed network backend for krata guest {}",
|
"destroyed network backend for krata zone {}",
|
||||||
self.metadata.uuid
|
self.metadata.uuid
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use hbridge::HostBridge;
|
|||||||
use krata::{
|
use krata::{
|
||||||
client::ControlClientProvider,
|
client::ControlClientProvider,
|
||||||
dial::ControlDialAddress,
|
dial::ControlDialAddress,
|
||||||
v1::{common::Guest, control::control_service_client::ControlServiceClient},
|
v1::{common::Zone, control::control_service_client::ControlServiceClient},
|
||||||
};
|
};
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use tokio::{task::JoinHandle, time::sleep};
|
use tokio::{task::JoinHandle, time::sleep};
|
||||||
@ -33,7 +33,7 @@ pub const EXTRA_MTU: usize = 20;
|
|||||||
|
|
||||||
pub struct NetworkService {
|
pub struct NetworkService {
|
||||||
pub control: ControlServiceClient<Channel>,
|
pub control: ControlServiceClient<Channel>,
|
||||||
pub guests: HashMap<Uuid, Guest>,
|
pub zones: HashMap<Uuid, Zone>,
|
||||||
pub backends: HashMap<Uuid, JoinHandle<()>>,
|
pub backends: HashMap<Uuid, JoinHandle<()>>,
|
||||||
pub bridge: VirtualBridge,
|
pub bridge: VirtualBridge,
|
||||||
pub hbridge: HostBridge,
|
pub hbridge: HostBridge,
|
||||||
@ -47,7 +47,7 @@ impl NetworkService {
|
|||||||
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
|
HostBridge::new(HOST_BRIDGE_MTU + EXTRA_MTU, "krata0".to_string(), &bridge).await?;
|
||||||
Ok(NetworkService {
|
Ok(NetworkService {
|
||||||
control,
|
control,
|
||||||
guests: HashMap::new(),
|
zones: HashMap::new(),
|
||||||
backends: HashMap::new(),
|
backends: HashMap::new(),
|
||||||
bridge,
|
bridge,
|
||||||
hbridge,
|
hbridge,
|
||||||
@ -99,7 +99,7 @@ impl NetworkService {
|
|||||||
|
|
||||||
Err((metadata, error)) => {
|
Err((metadata, error)) => {
|
||||||
warn!(
|
warn!(
|
||||||
"failed to launch network backend for krata guest {}: {}",
|
"failed to launch network backend for krata zone {}: {}",
|
||||||
metadata.uuid, error
|
metadata.uuid, error
|
||||||
);
|
);
|
||||||
failed.push(metadata.uuid);
|
failed.push(metadata.uuid);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "krata-runtime"
|
name = "krata-runtime"
|
||||||
description = "Runtime for running guests on the krata isolation engine"
|
description = "Runtime for managing zones on the krata isolation engine"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
homepage.workspace = true
|
homepage.workspace = true
|
||||||
|
@ -99,23 +99,23 @@ impl IpVendor {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
let assigned_ipv4 = store
|
let assigned_ipv4 = store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv4", dom_path))
|
||||||
.await?
|
.await?
|
||||||
.and_then(|x| Ipv4Network::from_str(&x).ok());
|
.and_then(|x| Ipv4Network::from_str(&x).ok());
|
||||||
let assigned_ipv6 = store
|
let assigned_ipv6 = store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv6", dom_path))
|
||||||
.await?
|
.await?
|
||||||
.and_then(|x| Ipv6Network::from_str(&x).ok());
|
.and_then(|x| Ipv6Network::from_str(&x).ok());
|
||||||
|
|
||||||
if let Some(existing_ipv4) = assigned_ipv4 {
|
if let Some(existing_ipv4) = assigned_ipv4 {
|
||||||
if let Some(previous) = state.ipv4.insert(existing_ipv4.ip(), uuid) {
|
if let Some(previous) = state.ipv4.insert(existing_ipv4.ip(), uuid) {
|
||||||
error!("ipv4 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv4.ip(), uuid, uuid);
|
error!("ipv4 conflict detected: zone {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv4.ip(), uuid, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(existing_ipv6) = assigned_ipv6 {
|
if let Some(existing_ipv6) = assigned_ipv6 {
|
||||||
if let Some(previous) = state.ipv6.insert(existing_ipv6.ip(), uuid) {
|
if let Some(previous) = state.ipv6.insert(existing_ipv6.ip(), uuid) {
|
||||||
error!("ipv6 conflict detected: guest {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv6.ip(), uuid, uuid);
|
error!("ipv6 conflict detected: zone {} owned {} but {} also claimed to own it, giving it to {}", previous, existing_ipv6.ip(), uuid, uuid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -251,13 +251,13 @@ impl IpVendor {
|
|||||||
intermediate.ipv6.insert(self.gateway_ipv6, self.host_uuid);
|
intermediate.ipv6.insert(self.gateway_ipv6, self.host_uuid);
|
||||||
for (ipv4, uuid) in &state.pending_ipv4 {
|
for (ipv4, uuid) in &state.pending_ipv4 {
|
||||||
if let Some(previous) = intermediate.ipv4.insert(*ipv4, *uuid) {
|
if let Some(previous) = intermediate.ipv4.insert(*ipv4, *uuid) {
|
||||||
error!("ipv4 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv4, uuid, uuid);
|
error!("ipv4 conflict detected: zone {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv4, uuid, uuid);
|
||||||
}
|
}
|
||||||
intermediate.pending_ipv4.insert(*ipv4, *uuid);
|
intermediate.pending_ipv4.insert(*ipv4, *uuid);
|
||||||
}
|
}
|
||||||
for (ipv6, uuid) in &state.pending_ipv6 {
|
for (ipv6, uuid) in &state.pending_ipv6 {
|
||||||
if let Some(previous) = intermediate.ipv6.insert(*ipv6, *uuid) {
|
if let Some(previous) = intermediate.ipv6.insert(*ipv6, *uuid) {
|
||||||
error!("ipv6 conflict detected: guest {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv6, uuid, uuid);
|
error!("ipv6 conflict detected: zone {} owned (pending) {} but {} also claimed to own it, giving it to {}", previous, ipv6, uuid, uuid);
|
||||||
}
|
}
|
||||||
intermediate.pending_ipv6.insert(*ipv6, *uuid);
|
intermediate.pending_ipv6.insert(*ipv6, *uuid);
|
||||||
}
|
}
|
||||||
@ -271,16 +271,16 @@ impl IpVendor {
|
|||||||
domid: u32,
|
domid: u32,
|
||||||
) -> Result<Option<IpAssignment>> {
|
) -> Result<Option<IpAssignment>> {
|
||||||
let dom_path = format!("/local/domain/{}", domid);
|
let dom_path = format!("/local/domain/{}", domid);
|
||||||
let Some(guest_ipv4) = self
|
let Some(zone_ipv4) = self
|
||||||
.store
|
.store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv4", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv4", dom_path))
|
||||||
.await?
|
.await?
|
||||||
else {
|
else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(guest_ipv6) = self
|
let Some(zone_ipv6) = self
|
||||||
.store
|
.store
|
||||||
.read_string(format!("{}/krata/network/guest/ipv6", dom_path))
|
.read_string(format!("{}/krata/network/zone/ipv6", dom_path))
|
||||||
.await?
|
.await?
|
||||||
else {
|
else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@ -300,10 +300,10 @@ impl IpVendor {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
|
|
||||||
let Some(guest_ipv4) = Ipv4Network::from_str(&guest_ipv4).ok() else {
|
let Some(zone_ipv4) = Ipv4Network::from_str(&zone_ipv4).ok() else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(guest_ipv6) = Ipv6Network::from_str(&guest_ipv6).ok() else {
|
let Some(zone_ipv6) = Ipv6Network::from_str(&zone_ipv6).ok() else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
};
|
};
|
||||||
let Some(gateway_ipv4) = Ipv4Network::from_str(&gateway_ipv4).ok() else {
|
let Some(gateway_ipv4) = Ipv4Network::from_str(&gateway_ipv4).ok() else {
|
||||||
@ -315,10 +315,10 @@ impl IpVendor {
|
|||||||
Ok(Some(IpAssignment {
|
Ok(Some(IpAssignment {
|
||||||
vendor: self.clone(),
|
vendor: self.clone(),
|
||||||
uuid,
|
uuid,
|
||||||
ipv4: guest_ipv4.ip(),
|
ipv4: zone_ipv4.ip(),
|
||||||
ipv4_prefix: guest_ipv4.prefix(),
|
ipv4_prefix: zone_ipv4.prefix(),
|
||||||
ipv6: guest_ipv6.ip(),
|
ipv6: zone_ipv6.ip(),
|
||||||
ipv6_prefix: guest_ipv6.prefix(),
|
ipv6_prefix: zone_ipv6.prefix(),
|
||||||
gateway_ipv4: gateway_ipv4.ip(),
|
gateway_ipv4: gateway_ipv4.ip(),
|
||||||
gateway_ipv6: gateway_ipv6.ip(),
|
gateway_ipv6: gateway_ipv6.ip(),
|
||||||
committed: true,
|
committed: true,
|
||||||
|
@ -20,13 +20,13 @@ use xenplatform::domain::BaseDomainConfig;
|
|||||||
use crate::cfgblk::ConfigBlock;
|
use crate::cfgblk::ConfigBlock;
|
||||||
use crate::RuntimeContext;
|
use crate::RuntimeContext;
|
||||||
|
|
||||||
use super::{GuestInfo, GuestState};
|
use super::{ZoneInfo, ZoneState};
|
||||||
|
|
||||||
pub use xenclient::{
|
pub use xenclient::{
|
||||||
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
pci::PciBdf, DomainPciDevice as PciDevice, DomainPciRdmReservePolicy as PciRdmReservePolicy,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct GuestLaunchRequest {
|
pub struct ZoneLaunchRequest {
|
||||||
pub format: LaunchPackedFormat,
|
pub format: LaunchPackedFormat,
|
||||||
pub kernel: Vec<u8>,
|
pub kernel: Vec<u8>,
|
||||||
pub initrd: Vec<u8>,
|
pub initrd: Vec<u8>,
|
||||||
@ -42,11 +42,11 @@ pub struct GuestLaunchRequest {
|
|||||||
pub addons_image: Option<PathBuf>,
|
pub addons_image: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestLauncher {
|
pub struct ZoneLauncher {
|
||||||
pub launch_semaphore: Arc<Semaphore>,
|
pub launch_semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestLauncher {
|
impl ZoneLauncher {
|
||||||
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
|
pub fn new(launch_semaphore: Arc<Semaphore>) -> Result<Self> {
|
||||||
Ok(Self { launch_semaphore })
|
Ok(Self { launch_semaphore })
|
||||||
}
|
}
|
||||||
@ -54,16 +54,16 @@ impl GuestLauncher {
|
|||||||
pub async fn launch(
|
pub async fn launch(
|
||||||
&mut self,
|
&mut self,
|
||||||
context: &RuntimeContext,
|
context: &RuntimeContext,
|
||||||
request: GuestLaunchRequest,
|
request: ZoneLaunchRequest,
|
||||||
) -> Result<GuestInfo> {
|
) -> Result<ZoneInfo> {
|
||||||
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
let uuid = request.uuid.unwrap_or_else(Uuid::new_v4);
|
||||||
let xen_name = format!("krata-{uuid}");
|
let xen_name = format!("krata-{uuid}");
|
||||||
let mut gateway_mac = MacAddr6::random();
|
let mut gateway_mac = MacAddr6::random();
|
||||||
gateway_mac.set_local(true);
|
gateway_mac.set_local(true);
|
||||||
gateway_mac.set_multicast(false);
|
gateway_mac.set_multicast(false);
|
||||||
let mut container_mac = MacAddr6::random();
|
let mut zone_mac = MacAddr6::random();
|
||||||
container_mac.set_local(true);
|
zone_mac.set_local(true);
|
||||||
container_mac.set_multicast(false);
|
zone_mac.set_multicast(false);
|
||||||
|
|
||||||
let _launch_permit = self.launch_semaphore.acquire().await?;
|
let _launch_permit = self.launch_semaphore.acquire().await?;
|
||||||
let mut ip = context.ipvendor.assign(uuid).await?;
|
let mut ip = context.ipvendor.assign(uuid).await?;
|
||||||
@ -145,7 +145,7 @@ impl GuestLauncher {
|
|||||||
}
|
}
|
||||||
let cmdline = cmdline_options.join(" ");
|
let cmdline = cmdline_options.join(" ");
|
||||||
|
|
||||||
let guest_mac_string = container_mac.to_string().replace('-', ":");
|
let zone_mac_string = zone_mac.to_string().replace('-', ":");
|
||||||
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
|
let gateway_mac_string = gateway_mac.to_string().replace('-', ":");
|
||||||
|
|
||||||
let mut disks = vec![
|
let mut disks = vec![
|
||||||
@ -191,16 +191,16 @@ impl GuestLauncher {
|
|||||||
("krata/uuid".to_string(), uuid.to_string()),
|
("krata/uuid".to_string(), uuid.to_string()),
|
||||||
("krata/loops".to_string(), loops.join(",")),
|
("krata/loops".to_string(), loops.join(",")),
|
||||||
(
|
(
|
||||||
"krata/network/guest/ipv4".to_string(),
|
"krata/network/zone/ipv4".to_string(),
|
||||||
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/guest/ipv6".to_string(),
|
"krata/network/zone/ipv6".to_string(),
|
||||||
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/guest/mac".to_string(),
|
"krata/network/zone/mac".to_string(),
|
||||||
guest_mac_string.clone(),
|
zone_mac_string.clone(),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"krata/network/gateway/ipv4".to_string(),
|
"krata/network/gateway/ipv4".to_string(),
|
||||||
@ -240,7 +240,7 @@ impl GuestLauncher {
|
|||||||
initialized: false,
|
initialized: false,
|
||||||
}],
|
}],
|
||||||
vifs: vec![DomainNetworkInterface {
|
vifs: vec![DomainNetworkInterface {
|
||||||
mac: guest_mac_string.clone(),
|
mac: zone_mac_string.clone(),
|
||||||
mtu: 1500,
|
mtu: 1500,
|
||||||
bridge: None,
|
bridge: None,
|
||||||
script: None,
|
script: None,
|
||||||
@ -248,20 +248,20 @@ impl GuestLauncher {
|
|||||||
pcis: request.pcis.clone(),
|
pcis: request.pcis.clone(),
|
||||||
filesystems: vec![],
|
filesystems: vec![],
|
||||||
extra_keys,
|
extra_keys,
|
||||||
extra_rw_paths: vec!["krata/guest".to_string()],
|
extra_rw_paths: vec!["krata/zone".to_string()],
|
||||||
};
|
};
|
||||||
match context.xen.create(&config).await {
|
match context.xen.create(&config).await {
|
||||||
Ok(created) => {
|
Ok(created) => {
|
||||||
ip.commit().await?;
|
ip.commit().await?;
|
||||||
Ok(GuestInfo {
|
Ok(ZoneInfo {
|
||||||
name: request.name.as_ref().map(|x| x.to_string()),
|
name: request.name.as_ref().map(|x| x.to_string()),
|
||||||
uuid,
|
uuid,
|
||||||
domid: created.domid,
|
domid: created.domid,
|
||||||
image: request.image.digest,
|
image: request.image.digest,
|
||||||
loops: vec![],
|
loops: vec![],
|
||||||
guest_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
|
zone_ipv4: Some(IpNetwork::new(IpAddr::V4(ip.ipv4), ip.ipv4_prefix)?),
|
||||||
guest_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
|
zone_ipv6: Some(IpNetwork::new(IpAddr::V6(ip.ipv6), ip.ipv6_prefix)?),
|
||||||
guest_mac: Some(guest_mac_string.clone()),
|
zone_mac: Some(zone_mac_string.clone()),
|
||||||
gateway_ipv4: Some(IpNetwork::new(
|
gateway_ipv4: Some(IpNetwork::new(
|
||||||
IpAddr::V4(ip.gateway_ipv4),
|
IpAddr::V4(ip.gateway_ipv4),
|
||||||
ip.ipv4_prefix,
|
ip.ipv4_prefix,
|
||||||
@ -271,7 +271,7 @@ impl GuestLauncher {
|
|||||||
ip.ipv6_prefix,
|
ip.ipv6_prefix,
|
||||||
)?),
|
)?),
|
||||||
gateway_mac: Some(gateway_mac_string.clone()),
|
gateway_mac: Some(gateway_mac_string.clone()),
|
||||||
state: GuestState { exit_code: None },
|
state: ZoneState { exit_code: None },
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
|
@ -12,7 +12,7 @@ use xenstore::{XsdClient, XsdInterface};
|
|||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
autoloop::AutoLoop,
|
autoloop::AutoLoop,
|
||||||
launch::{GuestLaunchRequest, GuestLauncher},
|
launch::{ZoneLaunchRequest, ZoneLauncher},
|
||||||
power::PowerManagementContext,
|
power::PowerManagementContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -29,29 +29,32 @@ type RuntimePlatform = xenplatform::x86pv::X86PvPlatform;
|
|||||||
#[cfg(not(target_arch = "x86_64"))]
|
#[cfg(not(target_arch = "x86_64"))]
|
||||||
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
|
type RuntimePlatform = xenplatform::unsupported::UnsupportedPlatform;
|
||||||
|
|
||||||
pub struct GuestLoopInfo {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneLoopInfo {
|
||||||
pub device: String,
|
pub device: String,
|
||||||
pub file: String,
|
pub file: String,
|
||||||
pub delete: Option<String>,
|
pub delete: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestState {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneState {
|
||||||
pub exit_code: Option<i32>,
|
pub exit_code: Option<i32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GuestInfo {
|
#[derive(Clone)]
|
||||||
|
pub struct ZoneInfo {
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub domid: u32,
|
pub domid: u32,
|
||||||
pub image: String,
|
pub image: String,
|
||||||
pub loops: Vec<GuestLoopInfo>,
|
pub loops: Vec<ZoneLoopInfo>,
|
||||||
pub guest_ipv4: Option<IpNetwork>,
|
pub zone_ipv4: Option<IpNetwork>,
|
||||||
pub guest_ipv6: Option<IpNetwork>,
|
pub zone_ipv6: Option<IpNetwork>,
|
||||||
pub guest_mac: Option<String>,
|
pub zone_mac: Option<String>,
|
||||||
pub gateway_ipv4: Option<IpNetwork>,
|
pub gateway_ipv4: Option<IpNetwork>,
|
||||||
pub gateway_ipv6: Option<IpNetwork>,
|
pub gateway_ipv6: Option<IpNetwork>,
|
||||||
pub gateway_mac: Option<String>,
|
pub gateway_mac: Option<String>,
|
||||||
pub state: GuestState,
|
pub state: ZoneState,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@ -75,8 +78,8 @@ impl RuntimeContext {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
||||||
let mut guests: Vec<GuestInfo> = Vec::new();
|
let mut zones: Vec<ZoneInfo> = Vec::new();
|
||||||
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
for domid_candidate in self.xen.store.list("/local/domain").await? {
|
||||||
if domid_candidate == "0" {
|
if domid_candidate == "0" {
|
||||||
continue;
|
continue;
|
||||||
@ -112,20 +115,20 @@ impl RuntimeContext {
|
|||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/loops", &dom_path))
|
.read_string(&format!("{}/krata/loops", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_ipv4 = self
|
let zone_ipv4 = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/ipv4", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/ipv4", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_ipv6 = self
|
let zone_ipv6 = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/ipv6", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/ipv6", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let guest_mac = self
|
let zone_mac = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/network/guest/mac", &dom_path))
|
.read_string(&format!("{}/krata/network/zone/mac", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
let gateway_ipv4 = self
|
let gateway_ipv4 = self
|
||||||
.xen
|
.xen
|
||||||
@ -143,14 +146,14 @@ impl RuntimeContext {
|
|||||||
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
|
.read_string(&format!("{}/krata/network/gateway/mac", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let guest_ipv4 = if let Some(guest_ipv4) = guest_ipv4 {
|
let zone_ipv4 = if let Some(zone_ipv4) = zone_ipv4 {
|
||||||
IpNetwork::from_str(&guest_ipv4).ok()
|
IpNetwork::from_str(&zone_ipv4).ok()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let guest_ipv6 = if let Some(guest_ipv6) = guest_ipv6 {
|
let zone_ipv6 = if let Some(zone_ipv6) = zone_ipv6 {
|
||||||
IpNetwork::from_str(&guest_ipv6).ok()
|
IpNetwork::from_str(&zone_ipv6).ok()
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -170,7 +173,7 @@ impl RuntimeContext {
|
|||||||
let exit_code = self
|
let exit_code = self
|
||||||
.xen
|
.xen
|
||||||
.store
|
.store
|
||||||
.read_string(&format!("{}/krata/guest/exit-code", &dom_path))
|
.read_string(&format!("{}/krata/zone/exit-code", &dom_path))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let exit_code: Option<i32> = match exit_code {
|
let exit_code: Option<i32> = match exit_code {
|
||||||
@ -178,37 +181,37 @@ impl RuntimeContext {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = GuestState { exit_code };
|
let state = ZoneState { exit_code };
|
||||||
|
|
||||||
let loops = RuntimeContext::parse_loop_set(&loops);
|
let loops = RuntimeContext::parse_loop_set(&loops);
|
||||||
guests.push(GuestInfo {
|
zones.push(ZoneInfo {
|
||||||
name,
|
name,
|
||||||
uuid,
|
uuid,
|
||||||
domid,
|
domid,
|
||||||
image,
|
image,
|
||||||
loops,
|
loops,
|
||||||
guest_ipv4,
|
zone_ipv4,
|
||||||
guest_ipv6,
|
zone_ipv6,
|
||||||
guest_mac,
|
zone_mac,
|
||||||
gateway_ipv4,
|
gateway_ipv4,
|
||||||
gateway_ipv6,
|
gateway_ipv6,
|
||||||
gateway_mac,
|
gateway_mac,
|
||||||
state,
|
state,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(guests)
|
Ok(zones)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<GuestInfo>> {
|
pub async fn resolve(&self, uuid: Uuid) -> Result<Option<ZoneInfo>> {
|
||||||
for guest in self.list().await? {
|
for zone in self.list().await? {
|
||||||
if guest.uuid == uuid {
|
if zone.uuid == uuid {
|
||||||
return Ok(Some(guest));
|
return Ok(Some(zone));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_loop_set(input: &Option<String>) -> Vec<GuestLoopInfo> {
|
fn parse_loop_set(input: &Option<String>) -> Vec<ZoneLoopInfo> {
|
||||||
let Some(input) = input else {
|
let Some(input) = input else {
|
||||||
return Vec::new();
|
return Vec::new();
|
||||||
};
|
};
|
||||||
@ -219,7 +222,7 @@ impl RuntimeContext {
|
|||||||
.map(|x| (x[0].clone(), x[1].clone(), x[2].clone()))
|
.map(|x| (x[0].clone(), x[1].clone(), x[2].clone()))
|
||||||
.collect::<Vec<(String, String, String)>>();
|
.collect::<Vec<(String, String, String)>>();
|
||||||
sets.iter()
|
sets.iter()
|
||||||
.map(|(device, file, delete)| GuestLoopInfo {
|
.map(|(device, file, delete)| ZoneLoopInfo {
|
||||||
device: device.clone(),
|
device: device.clone(),
|
||||||
file: file.clone(),
|
file: file.clone(),
|
||||||
delete: if delete == "none" {
|
delete: if delete == "none" {
|
||||||
@ -228,7 +231,7 @@ impl RuntimeContext {
|
|||||||
Some(delete.clone())
|
Some(delete.clone())
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
.collect::<Vec<GuestLoopInfo>>()
|
.collect::<Vec<ZoneLoopInfo>>()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,8 +252,8 @@ impl Runtime {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn launch(&self, request: GuestLaunchRequest) -> Result<GuestInfo> {
|
pub async fn launch(&self, request: ZoneLaunchRequest) -> Result<ZoneInfo> {
|
||||||
let mut launcher = GuestLauncher::new(self.launch_semaphore.clone())?;
|
let mut launcher = ZoneLauncher::new(self.launch_semaphore.clone())?;
|
||||||
launcher.launch(&self.context, request).await
|
launcher.launch(&self.context, request).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,7 +262,7 @@ impl Runtime {
|
|||||||
.context
|
.context
|
||||||
.resolve(uuid)
|
.resolve(uuid)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| anyhow!("unable to resolve guest: {}", uuid))?;
|
.ok_or_else(|| anyhow!("unable to resolve zone: {}", uuid))?;
|
||||||
let domid = info.domid;
|
let domid = info.domid;
|
||||||
let store = XsdClient::open().await?;
|
let store = XsdClient::open().await?;
|
||||||
let dom_path = store.get_domain_path(domid).await?;
|
let dom_path = store.get_domain_path(domid).await?;
|
||||||
@ -307,7 +310,7 @@ impl Runtime {
|
|||||||
if let Some(ip) = ip {
|
if let Some(ip) = ip {
|
||||||
if let Err(error) = self.context.ipvendor.recall(&ip).await {
|
if let Err(error) = self.context.ipvendor.recall(&ip).await {
|
||||||
error!(
|
error!(
|
||||||
"failed to recall ip assignment for guest {}: {}",
|
"failed to recall ip assignment for zone {}: {}",
|
||||||
uuid, error
|
uuid, error
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -316,7 +319,7 @@ impl Runtime {
|
|||||||
Ok(uuid)
|
Ok(uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn list(&self) -> Result<Vec<GuestInfo>> {
|
pub async fn list(&self) -> Result<Vec<ZoneInfo>> {
|
||||||
self.context.list().await
|
self.context.list().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ pub struct CpuTopologyInfo {
|
|||||||
pub class: CpuClass,
|
pub class: CpuClass,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn labelled_topo(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
|
fn labeled_topology(input: &[SysctlCputopo]) -> Vec<CpuTopologyInfo> {
|
||||||
let mut cores: IndexMap<(u32, u32, u32), Vec<CpuTopologyInfo>> = IndexMap::new();
|
let mut cores: IndexMap<(u32, u32, u32), Vec<CpuTopologyInfo>> = IndexMap::new();
|
||||||
let mut pe_cores = false;
|
let mut pe_cores = false;
|
||||||
let mut last: Option<SysctlCputopo> = None;
|
let mut last: Option<SysctlCputopo> = None;
|
||||||
@ -140,9 +140,9 @@ impl PowerManagementContext {
|
|||||||
/// If there is a p-core/e-core split, then CPU class will be defined as
|
/// If there is a p-core/e-core split, then CPU class will be defined as
|
||||||
/// `CpuClass::Performance` or `CpuClass::Efficiency`, else `CpuClass::Standard`.
|
/// `CpuClass::Performance` or `CpuClass::Efficiency`, else `CpuClass::Standard`.
|
||||||
pub async fn cpu_topology(&self) -> Result<Vec<CpuTopologyInfo>> {
|
pub async fn cpu_topology(&self) -> Result<Vec<CpuTopologyInfo>> {
|
||||||
let xentopo = self.context.xen.call.cpu_topology().await?;
|
let xen_topology = self.context.xen.call.cpu_topology().await?;
|
||||||
let logicaltopo = labelled_topo(&xentopo);
|
let logical_topology = labeled_topology(&xen_topology);
|
||||||
Ok(logicaltopo)
|
Ok(logical_topology)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enable or disable SMT awareness in the scheduler.
|
/// Enable or disable SMT awareness in the scheduler.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "krata-guest"
|
name = "krata-zone"
|
||||||
description = "Guest services for the krata isolation engine"
|
description = "zone services for the krata isolation engine"
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
version.workspace = true
|
version.workspace = true
|
||||||
homepage.workspace = true
|
homepage.workspace = true
|
||||||
@ -30,8 +30,8 @@ sysinfo = { workspace = true }
|
|||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "krataguest"
|
name = "kratazone"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "krataguest"
|
name = "krata-zone"
|
||||||
path = "bin/init.rs"
|
path = "bin/init.rs"
|
19
crates/zone/bin/init.rs
Normal file
19
crates/zone/bin/init.rs
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
use anyhow::Result;
|
||||||
|
use env_logger::Env;
|
||||||
|
use kratazone::{death, init::ZoneInit};
|
||||||
|
use log::error;
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
env::set_var("RUST_BACKTRACE", "1");
|
||||||
|
env_logger::Builder::from_env(Env::default().default_filter_or("warn")).init();
|
||||||
|
let mut zone = ZoneInit::new();
|
||||||
|
if let Err(error) = zone.init().await {
|
||||||
|
error!("failed to initialize zone: {}", error);
|
||||||
|
death(127).await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
death(1).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,7 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
childwait::{ChildEvent, ChildWait},
|
childwait::{ChildEvent, ChildWait},
|
||||||
death,
|
death,
|
||||||
exec::GuestExecTask,
|
exec::ZoneExecTask,
|
||||||
metrics::MetricsCollector,
|
metrics::MetricsCollector,
|
||||||
};
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
@ -18,20 +18,16 @@ use log::debug;
|
|||||||
use nix::unistd::Pid;
|
use nix::unistd::Pid;
|
||||||
use tokio::{select, sync::broadcast};
|
use tokio::{select, sync::broadcast};
|
||||||
|
|
||||||
pub struct GuestBackground {
|
pub struct ZoneBackground {
|
||||||
idm: IdmInternalClient,
|
idm: IdmInternalClient,
|
||||||
child: Pid,
|
child: Pid,
|
||||||
_cgroup: Cgroup,
|
_cgroup: Cgroup,
|
||||||
wait: ChildWait,
|
wait: ChildWait,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestBackground {
|
impl ZoneBackground {
|
||||||
pub async fn new(
|
pub async fn new(idm: IdmInternalClient, cgroup: Cgroup, child: Pid) -> Result<ZoneBackground> {
|
||||||
idm: IdmInternalClient,
|
Ok(ZoneBackground {
|
||||||
cgroup: Cgroup,
|
|
||||||
child: Pid,
|
|
||||||
) -> Result<GuestBackground> {
|
|
||||||
Ok(GuestBackground {
|
|
||||||
idm,
|
idm,
|
||||||
child,
|
child,
|
||||||
_cgroup: cgroup,
|
_cgroup: cgroup,
|
||||||
@ -134,7 +130,7 @@ impl GuestBackground {
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
|
if let Some(RequestType::ExecStream(_)) = &handle.initial.request {
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
let exec = GuestExecTask { handle };
|
let exec = ZoneExecTask { handle };
|
||||||
if let Err(error) = exec.run().await {
|
if let Err(error) = exec.run().await {
|
||||||
let _ = exec
|
let _ = exec
|
||||||
.handle
|
.handle
|
@ -15,11 +15,11 @@ use tokio::{
|
|||||||
process::Command,
|
process::Command,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct GuestExecTask {
|
pub struct ZoneExecTask {
|
||||||
pub handle: IdmClientStreamResponseHandle<Request>,
|
pub handle: IdmClientStreamResponseHandle<Request>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestExecTask {
|
impl ZoneExecTask {
|
||||||
pub async fn run(&self) -> Result<()> {
|
pub async fn run(&self) -> Result<()> {
|
||||||
let mut receiver = self.handle.take().await?;
|
let mut receiver = self.handle.take().await?;
|
||||||
|
|
@ -26,7 +26,7 @@ use std::str::FromStr;
|
|||||||
use sys_mount::{FilesystemType, Mount, MountFlags};
|
use sys_mount::{FilesystemType, Mount, MountFlags};
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
|
|
||||||
use crate::background::GuestBackground;
|
use crate::background::ZoneBackground;
|
||||||
|
|
||||||
const IMAGE_BLOCK_DEVICE_PATH: &str = "/dev/xvda";
|
const IMAGE_BLOCK_DEVICE_PATH: &str = "/dev/xvda";
|
||||||
const CONFIG_BLOCK_DEVICE_PATH: &str = "/dev/xvdb";
|
const CONFIG_BLOCK_DEVICE_PATH: &str = "/dev/xvdb";
|
||||||
@ -57,17 +57,17 @@ const ADDONS_MODULES_PATH: &str = "/addons/modules";
|
|||||||
|
|
||||||
ioctl_write_int_bad!(set_controlling_terminal, TIOCSCTTY);
|
ioctl_write_int_bad!(set_controlling_terminal, TIOCSCTTY);
|
||||||
|
|
||||||
pub struct GuestInit {}
|
pub struct ZoneInit {}
|
||||||
|
|
||||||
impl Default for GuestInit {
|
impl Default for ZoneInit {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::new()
|
Self::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GuestInit {
|
impl ZoneInit {
|
||||||
pub fn new() -> GuestInit {
|
pub fn new() -> ZoneInit {
|
||||||
GuestInit {}
|
ZoneInit {}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(&mut self) -> Result<()> {
|
pub async fn init(&mut self) -> Result<()> {
|
||||||
@ -127,7 +127,7 @@ impl GuestInit {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(cfg) = config.config() {
|
if let Some(cfg) = config.config() {
|
||||||
trace!("running guest task");
|
trace!("running zone task");
|
||||||
self.run(cfg, &launch, idm).await?;
|
self.run(cfg, &launch, idm).await?;
|
||||||
} else {
|
} else {
|
||||||
return Err(anyhow!(
|
return Err(anyhow!(
|
||||||
@ -521,7 +521,7 @@ impl GuestInit {
|
|||||||
|
|
||||||
let mut env = HashMap::new();
|
let mut env = HashMap::new();
|
||||||
if let Some(config_env) = config.env() {
|
if let Some(config_env) = config.env() {
|
||||||
env.extend(GuestInit::env_map(config_env));
|
env.extend(ZoneInit::env_map(config_env));
|
||||||
}
|
}
|
||||||
env.extend(launch.env.clone());
|
env.extend(launch.env.clone());
|
||||||
env.insert("KRATA_CONTAINER".to_string(), "1".to_string());
|
env.insert("KRATA_CONTAINER".to_string(), "1".to_string());
|
||||||
@ -540,13 +540,13 @@ impl GuestInit {
|
|||||||
return Err(anyhow!("cannot get file name of command path as str"));
|
return Err(anyhow!("cannot get file name of command path as str"));
|
||||||
};
|
};
|
||||||
cmd.insert(0, file_name.to_string());
|
cmd.insert(0, file_name.to_string());
|
||||||
let env = GuestInit::env_list(env);
|
let env = ZoneInit::env_list(env);
|
||||||
|
|
||||||
trace!("running guest command: {}", cmd.join(" "));
|
trace!("running zone command: {}", cmd.join(" "));
|
||||||
|
|
||||||
let path = CString::new(path.as_os_str().as_bytes())?;
|
let path = CString::new(path.as_os_str().as_bytes())?;
|
||||||
let cmd = GuestInit::strings_as_cstrings(cmd)?;
|
let cmd = ZoneInit::strings_as_cstrings(cmd)?;
|
||||||
let env = GuestInit::strings_as_cstrings(env)?;
|
let env = ZoneInit::strings_as_cstrings(env)?;
|
||||||
let mut working_dir = config
|
let mut working_dir = config
|
||||||
.working_dir()
|
.working_dir()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@ -566,7 +566,7 @@ impl GuestInit {
|
|||||||
async fn init_cgroup(&self) -> Result<Cgroup> {
|
async fn init_cgroup(&self) -> Result<Cgroup> {
|
||||||
trace!("initializing cgroup");
|
trace!("initializing cgroup");
|
||||||
let hierarchy = cgroups_rs::hierarchies::auto();
|
let hierarchy = cgroups_rs::hierarchies::auto();
|
||||||
let cgroup = Cgroup::new(hierarchy, "krata-guest-task")?;
|
let cgroup = Cgroup::new(hierarchy, "krata-zone-task")?;
|
||||||
cgroup.set_cgroup_type("threaded")?;
|
cgroup.set_cgroup_type("threaded")?;
|
||||||
trace!("initialized cgroup");
|
trace!("initialized cgroup");
|
||||||
Ok(cgroup)
|
Ok(cgroup)
|
||||||
@ -619,7 +619,7 @@ impl GuestInit {
|
|||||||
cmd: Vec<CString>,
|
cmd: Vec<CString>,
|
||||||
env: Vec<CString>,
|
env: Vec<CString>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
GuestInit::set_controlling_terminal()?;
|
ZoneInit::set_controlling_terminal()?;
|
||||||
std::env::set_current_dir(working_dir)?;
|
std::env::set_current_dir(working_dir)?;
|
||||||
cgroup.add_task(CgroupPid::from(std::process::id() as u64))?;
|
cgroup.add_task(CgroupPid::from(std::process::id() as u64))?;
|
||||||
execve(&path, &cmd, &env)?;
|
execve(&path, &cmd, &env)?;
|
||||||
@ -640,7 +640,7 @@ impl GuestInit {
|
|||||||
cgroup: Cgroup,
|
cgroup: Cgroup,
|
||||||
executed: Pid,
|
executed: Pid,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut background = GuestBackground::new(idm, cgroup, executed).await?;
|
let mut background = ZoneBackground::new(idm, cgroup, executed).await?;
|
||||||
background.run().await?;
|
background.run().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
@ -13,7 +13,7 @@ pub mod metrics;
|
|||||||
pub async fn death(code: c_int) -> Result<()> {
|
pub async fn death(code: c_int) -> Result<()> {
|
||||||
let store = XsdClient::open().await?;
|
let store = XsdClient::open().await?;
|
||||||
store
|
store
|
||||||
.write_string("krata/guest/exit-code", &code.to_string())
|
.write_string("krata/zone/exit-code", &code.to_string())
|
||||||
.await?;
|
.await?;
|
||||||
drop(store);
|
drop(store);
|
||||||
loop {
|
loop {
|
@ -14,7 +14,7 @@ impl MetricsCollector {
|
|||||||
pub fn collect(&self) -> Result<MetricNode> {
|
pub fn collect(&self) -> Result<MetricNode> {
|
||||||
let mut sysinfo = sysinfo::System::new();
|
let mut sysinfo = sysinfo::System::new();
|
||||||
Ok(MetricNode::structural(
|
Ok(MetricNode::structural(
|
||||||
"guest",
|
"zone",
|
||||||
vec![
|
vec![
|
||||||
self.collect_system(&mut sysinfo)?,
|
self.collect_system(&mut sysinfo)?,
|
||||||
self.collect_processes(&mut sysinfo)?,
|
self.collect_processes(&mut sysinfo)?,
|
@ -19,12 +19,12 @@ fi
|
|||||||
build_and_run() {
|
build_and_run() {
|
||||||
EXE_TARGET="${1}"
|
EXE_TARGET="${1}"
|
||||||
shift
|
shift
|
||||||
sudo mkdir -p /var/lib/krata/guest
|
sudo mkdir -p /var/lib/krata/zone
|
||||||
if [ "${KRATA_BUILD_INITRD}" = "1" ]
|
if [ "${KRATA_BUILD_INITRD}" = "1" ]
|
||||||
then
|
then
|
||||||
TARGET_ARCH="$(./hack/build/arch.sh)"
|
TARGET_ARCH="$(./hack/build/arch.sh)"
|
||||||
./hack/initrd/build.sh ${CARGO_BUILD_FLAGS}
|
./hack/initrd/build.sh ${CARGO_BUILD_FLAGS}
|
||||||
sudo cp "target/initrd/initrd-${TARGET_ARCH}" "/var/lib/krata/guest/initrd"
|
sudo cp "target/initrd/initrd-${TARGET_ARCH}" "/var/lib/krata/zone/initrd"
|
||||||
fi
|
fi
|
||||||
RUST_TARGET="$(./hack/build/target.sh)"
|
RUST_TARGET="$(./hack/build/target.sh)"
|
||||||
./hack/build/cargo.sh build ${CARGO_BUILD_FLAGS} --bin "${EXE_TARGET}"
|
./hack/build/cargo.sh build ${CARGO_BUILD_FLAGS} --bin "${EXE_TARGET}"
|
||||||
|
6
hack/dist/systar.sh
vendored
6
hack/dist/systar.sh
vendored
@ -38,9 +38,9 @@ else
|
|||||||
mv ../krata/kratad.service ../krata/kratanet.service usr/lib/systemd/system/
|
mv ../krata/kratad.service ../krata/kratanet.service usr/lib/systemd/system/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p usr/share/krata/guest
|
mkdir -p usr/share/krata/zone
|
||||||
mv ../krata/kernel ../krata/initrd usr/share/krata/guest
|
mv ../krata/kernel ../krata/initrd usr/share/krata/zone
|
||||||
mv ../krata/addons.squashfs usr/share/krata/guest/addons.squashfs
|
mv ../krata/addons.squashfs usr/share/krata/zone/addons.squashfs
|
||||||
|
|
||||||
tar czf "${SYSTAR}" --owner 0 --group 0 .
|
tar czf "${SYSTAR}" --owner 0 --group 0 .
|
||||||
|
|
||||||
|
@ -12,9 +12,9 @@ export TARGET_LIBC="musl"
|
|||||||
RUST_TARGET="$(./hack/build/target.sh)"
|
RUST_TARGET="$(./hack/build/target.sh)"
|
||||||
export RUSTFLAGS="-Ctarget-feature=+crt-static"
|
export RUSTFLAGS="-Ctarget-feature=+crt-static"
|
||||||
|
|
||||||
./hack/build/cargo.sh build "${@}" --release --bin krataguest
|
./hack/build/cargo.sh build "${@}" --release --bin krata-zone
|
||||||
INITRD_DIR="$(mktemp -d /tmp/krata-initrd.XXXXXXXXXXXXX)"
|
INITRD_DIR="$(mktemp -d /tmp/krata-initrd.XXXXXXXXXXXXX)"
|
||||||
cp "target/${RUST_TARGET}/release/krataguest" "${INITRD_DIR}/init"
|
cp "target/${RUST_TARGET}/release/krata-zone" "${INITRD_DIR}/init"
|
||||||
chmod +x "${INITRD_DIR}/init"
|
chmod +x "${INITRD_DIR}/init"
|
||||||
cd "${INITRD_DIR}"
|
cd "${INITRD_DIR}"
|
||||||
mkdir -p "${KRATA_DIR}/target/initrd"
|
mkdir -p "${KRATA_DIR}/target/initrd"
|
||||||
|
@ -14,7 +14,7 @@ changelog_path = "./CHANGELOG.md"
|
|||||||
changelog_include = [
|
changelog_include = [
|
||||||
"krata-daemon",
|
"krata-daemon",
|
||||||
"krata-ctl",
|
"krata-ctl",
|
||||||
"krata-guest",
|
"krata-zone",
|
||||||
"krata-network",
|
"krata-network",
|
||||||
"krata-runtime",
|
"krata-runtime",
|
||||||
"krata-oci",
|
"krata-oci",
|
||||||
|
@ -30,9 +30,9 @@ chmod +x /usr/sbin/kratad
|
|||||||
chmod +x /usr/sbin/kratanet
|
chmod +x /usr/sbin/kratanet
|
||||||
chmod +x /usr/bin/kratactl
|
chmod +x /usr/bin/kratactl
|
||||||
|
|
||||||
mkdir -p /var/lib/krata /usr/share/krata/guest
|
mkdir -p /var/lib/krata /usr/share/krata/zone
|
||||||
cp kernel /usr/share/krata/guest/kernel
|
cp kernel /usr/share/krata/zone/kernel
|
||||||
cp initrd /usr/share/krata/guest/initrd
|
cp initrd /usr/share/krata/zone/initrd
|
||||||
|
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl enable kratad.service kratanet.service
|
systemctl enable kratad.service kratanet.service
|
||||||
|
Reference in New Issue
Block a user