mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-04 05:31:32 +00:00
feature(krata): rename guest to zone (#266)
This commit is contained in:
@ -13,7 +13,7 @@ use tokio::{
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::glt::GuestLookupTable;
|
||||
use crate::zlt::ZoneLookupTable;
|
||||
|
||||
const CONSOLE_BUFFER_SIZE: usize = 1024 * 1024;
|
||||
type RawConsoleBuffer = CircularBuffer<CONSOLE_BUFFER_SIZE, u8>;
|
||||
@ -24,7 +24,7 @@ type BufferMap = Arc<Mutex<HashMap<u32, ConsoleBuffer>>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonConsoleHandle {
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
listeners: ListenerMap,
|
||||
buffers: BufferMap,
|
||||
sender: Sender<(u32, Vec<u8>)>,
|
||||
@ -84,7 +84,7 @@ impl Drop for DaemonConsoleHandle {
|
||||
}
|
||||
|
||||
pub struct DaemonConsole {
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
listeners: ListenerMap,
|
||||
buffers: BufferMap,
|
||||
receiver: Receiver<(u32, Option<Vec<u8>>)>,
|
||||
@ -93,7 +93,7 @@ pub struct DaemonConsole {
|
||||
}
|
||||
|
||||
impl DaemonConsole {
|
||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonConsole> {
|
||||
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonConsole> {
|
||||
let (service, sender, receiver) =
|
||||
ChannelService::new("krata-console".to_string(), Some(0)).await?;
|
||||
let task = service.launch().await?;
|
||||
|
@ -7,16 +7,16 @@ use krata::{
|
||||
ExecStreamRequestStdin, ExecStreamRequestUpdate, MetricsRequest, Request as IdmRequest,
|
||||
},
|
||||
v1::{
|
||||
common::{Guest, GuestState, GuestStatus, OciImageFormat},
|
||||
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
|
||||
control::{
|
||||
control_service_server::ControlService, ConsoleDataReply, ConsoleDataRequest,
|
||||
CreateGuestReply, CreateGuestRequest, DestroyGuestReply, DestroyGuestRequest,
|
||||
DeviceInfo, ExecGuestReply, ExecGuestRequest, HostCpuTopologyInfo,
|
||||
HostCpuTopologyReply, HostCpuTopologyRequest, HostPowerManagementPolicy,
|
||||
IdentifyHostReply, IdentifyHostRequest, ListDevicesReply, ListDevicesRequest,
|
||||
ListGuestsReply, ListGuestsRequest, PullImageReply, PullImageRequest,
|
||||
ReadGuestMetricsReply, ReadGuestMetricsRequest, ResolveGuestReply, ResolveGuestRequest,
|
||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest,
|
||||
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
|
||||
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
|
||||
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
|
||||
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
|
||||
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
||||
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
|
||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
|
||||
ZoneConsoleRequest,
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -37,9 +37,9 @@ use tonic::{Request, Response, Status, Streaming};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
command::DaemonCommand, console::DaemonConsoleHandle, db::GuestStore,
|
||||
devices::DaemonDeviceManager, event::DaemonEventContext, glt::GuestLookupTable,
|
||||
idm::DaemonIdmHandle, metrics::idm_metric_to_api, oci::convert_oci_progress,
|
||||
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
|
||||
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
|
||||
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
|
||||
};
|
||||
|
||||
pub struct ApiError {
|
||||
@ -62,13 +62,13 @@ impl From<ApiError> for Status {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonControlService {
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
devices: DaemonDeviceManager,
|
||||
events: DaemonEventContext,
|
||||
console: DaemonConsoleHandle,
|
||||
idm: DaemonIdmHandle,
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zones: ZoneStore,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
packer: OciPackerService,
|
||||
runtime: Runtime,
|
||||
}
|
||||
@ -76,13 +76,13 @@ pub struct DaemonControlService {
|
||||
impl DaemonControlService {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
devices: DaemonDeviceManager,
|
||||
events: DaemonEventContext,
|
||||
console: DaemonConsoleHandle,
|
||||
idm: DaemonIdmHandle,
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zones: ZoneStore,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
packer: OciPackerService,
|
||||
runtime: Runtime,
|
||||
) -> Self {
|
||||
@ -92,8 +92,8 @@ impl DaemonControlService {
|
||||
events,
|
||||
console,
|
||||
idm,
|
||||
guests,
|
||||
guest_reconciler_notify,
|
||||
zones,
|
||||
zone_reconciler_notify,
|
||||
packer,
|
||||
runtime,
|
||||
}
|
||||
@ -102,7 +102,7 @@ impl DaemonControlService {
|
||||
|
||||
enum ConsoleDataSelect {
|
||||
Read(Option<Vec<u8>>),
|
||||
Write(Option<Result<ConsoleDataRequest, tonic::Status>>),
|
||||
Write(Option<Result<ZoneConsoleRequest, Status>>),
|
||||
}
|
||||
|
||||
enum PullImageSelect {
|
||||
@ -112,11 +112,11 @@ enum PullImageSelect {
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl ControlService for DaemonControlService {
|
||||
type ExecGuestStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ExecGuestReply, Status>> + Send + 'static>>;
|
||||
type ExecZoneStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
|
||||
|
||||
type ConsoleDataStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ConsoleDataReply, Status>> + Send + 'static>>;
|
||||
type AttachZoneConsoleStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
||||
|
||||
type PullImageStream =
|
||||
Pin<Box<dyn Stream<Item = Result<PullImageReply, Status>> + Send + 'static>>;
|
||||
@ -139,25 +139,25 @@ impl ControlService for DaemonControlService {
|
||||
}))
|
||||
}
|
||||
|
||||
async fn create_guest(
|
||||
async fn create_zone(
|
||||
&self,
|
||||
request: Request<CreateGuestRequest>,
|
||||
) -> Result<Response<CreateGuestReply>, Status> {
|
||||
request: Request<CreateZoneRequest>,
|
||||
) -> Result<Response<CreateZoneReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let Some(spec) = request.spec else {
|
||||
return Err(ApiError {
|
||||
message: "guest spec not provided".to_string(),
|
||||
message: "zone spec not provided".to_string(),
|
||||
}
|
||||
.into());
|
||||
};
|
||||
let uuid = Uuid::new_v4();
|
||||
self.guests
|
||||
self.zones
|
||||
.update(
|
||||
uuid,
|
||||
Guest {
|
||||
Zone {
|
||||
id: uuid.to_string(),
|
||||
state: Some(GuestState {
|
||||
status: GuestStatus::Starting.into(),
|
||||
state: Some(ZoneState {
|
||||
status: ZoneStatus::Starting.into(),
|
||||
network: None,
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
@ -169,21 +169,21 @@ impl ControlService for DaemonControlService {
|
||||
)
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
self.guest_reconciler_notify
|
||||
self.zone_reconciler_notify
|
||||
.send(uuid)
|
||||
.await
|
||||
.map_err(|x| ApiError {
|
||||
message: x.to_string(),
|
||||
})?;
|
||||
Ok(Response::new(CreateGuestReply {
|
||||
guest_id: uuid.to_string(),
|
||||
Ok(Response::new(CreateZoneReply {
|
||||
zone_id: uuid.to_string(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn exec_guest(
|
||||
async fn exec_zone(
|
||||
&self,
|
||||
request: Request<Streaming<ExecGuestRequest>>,
|
||||
) -> Result<Response<Self::ExecGuestStream>, Status> {
|
||||
request: Request<Streaming<ExecZoneRequest>>,
|
||||
) -> Result<Response<Self::ExecZoneStream>, Status> {
|
||||
let mut input = request.into_inner();
|
||||
let Some(request) = input.next().await else {
|
||||
return Err(ApiError {
|
||||
@ -200,7 +200,7 @@ impl ControlService for DaemonControlService {
|
||||
.into());
|
||||
};
|
||||
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let idm = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||
@ -232,7 +232,7 @@ impl ControlService for DaemonControlService {
|
||||
loop {
|
||||
select! {
|
||||
x = input.next() => if let Some(update) = x {
|
||||
let update: Result<ExecGuestRequest, Status> = update.map_err(|error| ApiError {
|
||||
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||
message: error.to_string()
|
||||
}.into());
|
||||
|
||||
@ -252,7 +252,7 @@ impl ControlService for DaemonControlService {
|
||||
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||
break;
|
||||
};
|
||||
let reply = ExecGuestReply {
|
||||
let reply = ExecZoneReply {
|
||||
exited: update.exited,
|
||||
error: update.error,
|
||||
exit_code: update.exit_code,
|
||||
@ -269,80 +269,80 @@ impl ControlService for DaemonControlService {
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response::new(Box::pin(output) as Self::ExecGuestStream))
|
||||
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
|
||||
}
|
||||
|
||||
async fn destroy_guest(
|
||||
async fn destroy_zone(
|
||||
&self,
|
||||
request: Request<DestroyGuestRequest>,
|
||||
) -> Result<Response<DestroyGuestReply>, Status> {
|
||||
request: Request<DestroyZoneRequest>,
|
||||
) -> Result<Response<DestroyZoneReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let Some(mut guest) = self.guests.read(uuid).await.map_err(ApiError::from)? else {
|
||||
let Some(mut zone) = self.zones.read(uuid).await.map_err(ApiError::from)? else {
|
||||
return Err(ApiError {
|
||||
message: "guest not found".to_string(),
|
||||
message: "zone not found".to_string(),
|
||||
}
|
||||
.into());
|
||||
};
|
||||
|
||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||
|
||||
if guest.state.as_ref().unwrap().status() == GuestStatus::Destroyed {
|
||||
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
|
||||
return Err(ApiError {
|
||||
message: "guest already destroyed".to_string(),
|
||||
message: "zone already destroyed".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
guest.state.as_mut().unwrap().status = GuestStatus::Destroying.into();
|
||||
self.guests
|
||||
.update(uuid, guest)
|
||||
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
|
||||
self.zones
|
||||
.update(uuid, zone)
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
self.guest_reconciler_notify
|
||||
self.zone_reconciler_notify
|
||||
.send(uuid)
|
||||
.await
|
||||
.map_err(|x| ApiError {
|
||||
message: x.to_string(),
|
||||
})?;
|
||||
Ok(Response::new(DestroyGuestReply {}))
|
||||
Ok(Response::new(DestroyZoneReply {}))
|
||||
}
|
||||
|
||||
async fn list_guests(
|
||||
async fn list_zones(
|
||||
&self,
|
||||
request: Request<ListGuestsRequest>,
|
||||
) -> Result<Response<ListGuestsReply>, Status> {
|
||||
request: Request<ListZonesRequest>,
|
||||
) -> Result<Response<ListZonesReply>, Status> {
|
||||
let _ = request.into_inner();
|
||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
||||
let guests = guests.into_values().collect::<Vec<Guest>>();
|
||||
Ok(Response::new(ListGuestsReply { guests }))
|
||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||
let zones = zones.into_values().collect::<Vec<Zone>>();
|
||||
Ok(Response::new(ListZonesReply { zones }))
|
||||
}
|
||||
|
||||
async fn resolve_guest(
|
||||
async fn resolve_zone(
|
||||
&self,
|
||||
request: Request<ResolveGuestRequest>,
|
||||
) -> Result<Response<ResolveGuestReply>, Status> {
|
||||
request: Request<ResolveZoneRequest>,
|
||||
) -> Result<Response<ResolveZoneReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let guests = self.guests.list().await.map_err(ApiError::from)?;
|
||||
let guests = guests
|
||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||
let zones = zones
|
||||
.into_values()
|
||||
.filter(|x| {
|
||||
let comparison_spec = x.spec.as_ref().cloned().unwrap_or_default();
|
||||
(!request.name.is_empty() && comparison_spec.name == request.name)
|
||||
|| x.id == request.name
|
||||
})
|
||||
.collect::<Vec<Guest>>();
|
||||
Ok(Response::new(ResolveGuestReply {
|
||||
guest: guests.first().cloned(),
|
||||
.collect::<Vec<Zone>>();
|
||||
Ok(Response::new(ResolveZoneReply {
|
||||
zone: zones.first().cloned(),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn console_data(
|
||||
async fn attach_zone_console(
|
||||
&self,
|
||||
request: Request<Streaming<ConsoleDataRequest>>,
|
||||
) -> Result<Response<Self::ConsoleDataStream>, Status> {
|
||||
request: Request<Streaming<ZoneConsoleRequest>>,
|
||||
) -> Result<Response<Self::AttachZoneConsoleStream>, Status> {
|
||||
let mut input = request.into_inner();
|
||||
let Some(request) = input.next().await else {
|
||||
return Err(ApiError {
|
||||
@ -351,7 +351,7 @@ impl ControlService for DaemonControlService {
|
||||
.into());
|
||||
};
|
||||
let request = request?;
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let (sender, mut receiver) = channel(100);
|
||||
@ -364,7 +364,7 @@ impl ControlService for DaemonControlService {
|
||||
})?;
|
||||
|
||||
let output = try_stream! {
|
||||
yield ConsoleDataReply { data: console.initial.clone(), };
|
||||
yield ZoneConsoleReply { data: console.initial.clone(), };
|
||||
loop {
|
||||
let what = select! {
|
||||
x = receiver.recv() => ConsoleDataSelect::Read(x),
|
||||
@ -373,7 +373,7 @@ impl ControlService for DaemonControlService {
|
||||
|
||||
match what {
|
||||
ConsoleDataSelect::Read(Some(data)) => {
|
||||
yield ConsoleDataReply { data, };
|
||||
yield ZoneConsoleReply { data, };
|
||||
},
|
||||
|
||||
ConsoleDataSelect::Read(None) => {
|
||||
@ -396,15 +396,17 @@ impl ControlService for DaemonControlService {
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response::new(Box::pin(output) as Self::ConsoleDataStream))
|
||||
Ok(Response::new(
|
||||
Box::pin(output) as Self::AttachZoneConsoleStream
|
||||
))
|
||||
}
|
||||
|
||||
async fn read_guest_metrics(
|
||||
async fn read_zone_metrics(
|
||||
&self,
|
||||
request: Request<ReadGuestMetricsRequest>,
|
||||
) -> Result<Response<ReadGuestMetricsReply>, Status> {
|
||||
request: Request<ReadZoneMetricsRequest>,
|
||||
) -> Result<Response<ReadZoneMetricsReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let uuid = Uuid::from_str(&request.guest_id).map_err(|error| ApiError {
|
||||
let uuid = Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
let client = self.idm.client(uuid).await.map_err(|error| ApiError {
|
||||
@ -420,7 +422,7 @@ impl ControlService for DaemonControlService {
|
||||
message: error.to_string(),
|
||||
})?;
|
||||
|
||||
let mut reply = ReadGuestMetricsReply::default();
|
||||
let mut reply = ReadZoneMetricsReply::default();
|
||||
if let Some(IdmResponseType::Metrics(metrics)) = response.response {
|
||||
reply.root = metrics.root.map(idm_metric_to_api);
|
||||
}
|
||||
|
@ -1,66 +1,66 @@
|
||||
use std::{collections::HashMap, path::Path, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use krata::v1::common::Guest;
|
||||
use krata::v1::common::Zone;
|
||||
use log::error;
|
||||
use prost::Message;
|
||||
use redb::{Database, ReadableTable, TableDefinition};
|
||||
use uuid::Uuid;
|
||||
|
||||
const GUESTS: TableDefinition<u128, &[u8]> = TableDefinition::new("guests");
|
||||
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GuestStore {
|
||||
pub struct ZoneStore {
|
||||
database: Arc<Database>,
|
||||
}
|
||||
|
||||
impl GuestStore {
|
||||
impl ZoneStore {
|
||||
pub fn open(path: &Path) -> Result<Self> {
|
||||
let database = Database::create(path)?;
|
||||
let write = database.begin_write()?;
|
||||
let _ = write.open_table(GUESTS);
|
||||
let _ = write.open_table(ZONES);
|
||||
write.commit()?;
|
||||
Ok(GuestStore {
|
||||
Ok(ZoneStore {
|
||||
database: Arc::new(database),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn read(&self, id: Uuid) -> Result<Option<Guest>> {
|
||||
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
||||
let read = self.database.begin_read()?;
|
||||
let table = read.open_table(GUESTS)?;
|
||||
let table = read.open_table(ZONES)?;
|
||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let bytes = entry.value();
|
||||
Ok(Some(Guest::decode(bytes)?))
|
||||
Ok(Some(Zone::decode(bytes)?))
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<HashMap<Uuid, Guest>> {
|
||||
let mut guests: HashMap<Uuid, Guest> = HashMap::new();
|
||||
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
||||
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||
let read = self.database.begin_read()?;
|
||||
let table = read.open_table(GUESTS)?;
|
||||
let table = read.open_table(ZONES)?;
|
||||
for result in table.iter()? {
|
||||
let (key, value) = result?;
|
||||
let uuid = Uuid::from_u128_le(key.value());
|
||||
let state = match Guest::decode(value.value()) {
|
||||
let state = match Zone::decode(value.value()) {
|
||||
Ok(state) => state,
|
||||
Err(error) => {
|
||||
error!(
|
||||
"found invalid guest state in database for uuid {}: {}",
|
||||
"found invalid zone state in database for uuid {}: {}",
|
||||
uuid, error
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
guests.insert(uuid, state);
|
||||
zones.insert(uuid, state);
|
||||
}
|
||||
Ok(guests)
|
||||
Ok(zones)
|
||||
}
|
||||
|
||||
pub async fn update(&self, id: Uuid, entry: Guest) -> Result<()> {
|
||||
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
||||
let write = self.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(GUESTS)?;
|
||||
let mut table = write.open_table(ZONES)?;
|
||||
let bytes = entry.encode_to_vec();
|
||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||
}
|
||||
@ -71,7 +71,7 @@ impl GuestStore {
|
||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||
let write = self.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(GUESTS)?;
|
||||
let mut table = write.open_table(ZONES)?;
|
||||
table.remove(id.to_u128_le())?;
|
||||
}
|
||||
write.commit()?;
|
||||
|
@ -31,7 +31,7 @@ impl DaemonDeviceManager {
|
||||
let mut devices = self.devices.write().await;
|
||||
let Some(state) = devices.get_mut(device) else {
|
||||
return Err(anyhow!(
|
||||
"unable to claim unknown device '{}' for guest {}",
|
||||
"unable to claim unknown device '{}' for zone {}",
|
||||
device,
|
||||
uuid
|
||||
));
|
||||
@ -39,7 +39,7 @@ impl DaemonDeviceManager {
|
||||
|
||||
if let Some(owner) = state.owner {
|
||||
return Err(anyhow!(
|
||||
"unable to claim device '{}' for guest {}: already claimed by {}",
|
||||
"unable to claim device '{}' for zone {}: already claimed by {}",
|
||||
device,
|
||||
uuid,
|
||||
owner
|
||||
@ -92,7 +92,7 @@ impl DaemonDeviceManager {
|
||||
|
||||
for (name, uuid) in &claims {
|
||||
if !devices.contains_key(name) {
|
||||
warn!("unknown device '{}' assigned to guest {}", name, uuid);
|
||||
warn!("unknown device '{}' assigned to zone {}", name, uuid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,10 +4,12 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
|
||||
use anyhow::Result;
|
||||
use krata::v1::common::ZoneExitInfo;
|
||||
use krata::{
|
||||
idm::{internal::event::Event as EventType, internal::Event},
|
||||
v1::common::{GuestExitInfo, GuestState, GuestStatus},
|
||||
v1::common::{ZoneState, ZoneStatus},
|
||||
};
|
||||
use log::{error, warn};
|
||||
use tokio::{
|
||||
@ -21,8 +23,6 @@ use tokio::{
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{db::GuestStore, idm::DaemonIdmHandle};
|
||||
|
||||
pub type DaemonEvent = krata::v1::control::watch_events_reply::Event;
|
||||
|
||||
const EVENT_CHANNEL_QUEUE_LEN: usize = 1000;
|
||||
@ -45,8 +45,8 @@ impl DaemonEventContext {
|
||||
}
|
||||
|
||||
pub struct DaemonEventGenerator {
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zones: ZoneStore,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
feed: broadcast::Receiver<DaemonEvent>,
|
||||
idm: DaemonIdmHandle,
|
||||
idms: HashMap<u32, (Uuid, JoinHandle<()>)>,
|
||||
@ -57,15 +57,15 @@ pub struct DaemonEventGenerator {
|
||||
|
||||
impl DaemonEventGenerator {
|
||||
pub async fn new(
|
||||
guests: GuestStore,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zones: ZoneStore,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
idm: DaemonIdmHandle,
|
||||
) -> Result<(DaemonEventContext, DaemonEventGenerator)> {
|
||||
let (sender, _) = broadcast::channel(EVENT_CHANNEL_QUEUE_LEN);
|
||||
let (idm_sender, idm_receiver) = channel(IDM_EVENT_CHANNEL_QUEUE_LEN);
|
||||
let generator = DaemonEventGenerator {
|
||||
guests,
|
||||
guest_reconciler_notify,
|
||||
zones,
|
||||
zone_reconciler_notify,
|
||||
feed: sender.subscribe(),
|
||||
idm,
|
||||
idms: HashMap::new(),
|
||||
@ -78,20 +78,20 @@ impl DaemonEventGenerator {
|
||||
}
|
||||
|
||||
async fn handle_feed_event(&mut self, event: &DaemonEvent) -> Result<()> {
|
||||
let DaemonEvent::GuestChanged(changed) = event;
|
||||
let Some(ref guest) = changed.guest else {
|
||||
let DaemonEvent::ZoneChanged(changed) = event;
|
||||
let Some(ref zone) = changed.zone else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let Some(ref state) = guest.state else {
|
||||
let Some(ref state) = zone.state else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let status = state.status();
|
||||
let id = Uuid::from_str(&guest.id)?;
|
||||
let id = Uuid::from_str(&zone.id)?;
|
||||
let domid = state.domid;
|
||||
match status {
|
||||
GuestStatus::Started => {
|
||||
ZoneStatus::Started => {
|
||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||
let client = self.idm.client_by_domid(domid).await?;
|
||||
let mut receiver = client.subscribe().await?;
|
||||
@ -111,7 +111,7 @@ impl DaemonEventGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
GuestStatus::Destroyed => {
|
||||
ZoneStatus::Destroyed => {
|
||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||
handle.abort();
|
||||
}
|
||||
@ -130,18 +130,18 @@ impl DaemonEventGenerator {
|
||||
}
|
||||
|
||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||
if let Some(mut guest) = self.guests.read(id).await? {
|
||||
guest.state = Some(GuestState {
|
||||
status: GuestStatus::Exited.into(),
|
||||
network: guest.state.clone().unwrap_or_default().network,
|
||||
exit_info: Some(GuestExitInfo { code }),
|
||||
if let Some(mut zone) = self.zones.read(id).await? {
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Exited.into(),
|
||||
network: zone.state.clone().unwrap_or_default().network,
|
||||
exit_info: Some(ZoneExitInfo { code }),
|
||||
error_info: None,
|
||||
host: guest.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||
domid: guest.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||
});
|
||||
|
||||
self.guests.update(id, guest).await?;
|
||||
self.guest_reconciler_notify.send(id).await?;
|
||||
self.zones.update(id, zone).await?;
|
||||
self.zone_reconciler_notify.send(id).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -24,14 +24,14 @@ use tokio::{
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::glt::GuestLookupTable;
|
||||
use crate::zlt::ZoneLookupTable;
|
||||
|
||||
type BackendFeedMap = Arc<Mutex<HashMap<u32, Sender<IdmTransportPacket>>>>;
|
||||
type ClientMap = Arc<Mutex<HashMap<u32, IdmInternalClient>>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DaemonIdmHandle {
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
clients: ClientMap,
|
||||
feeds: BackendFeedMap,
|
||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||
@ -72,7 +72,7 @@ pub struct DaemonIdmSnoopPacket {
|
||||
}
|
||||
|
||||
pub struct DaemonIdm {
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
clients: ClientMap,
|
||||
feeds: BackendFeedMap,
|
||||
tx_sender: Sender<(u32, IdmTransportPacket)>,
|
||||
@ -84,7 +84,7 @@ pub struct DaemonIdm {
|
||||
}
|
||||
|
||||
impl DaemonIdm {
|
||||
pub async fn new(glt: GuestLookupTable) -> Result<DaemonIdm> {
|
||||
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
|
||||
let (service, tx_raw_sender, rx_receiver) =
|
||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||
let (tx_sender, tx_receiver) = channel(100);
|
||||
|
@ -4,16 +4,15 @@ use anyhow::{anyhow, Result};
|
||||
use config::DaemonConfig;
|
||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||
use control::DaemonControlService;
|
||||
use db::GuestStore;
|
||||
use db::ZoneStore;
|
||||
use devices::DaemonDeviceManager;
|
||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||
use glt::GuestLookupTable;
|
||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||
use kratart::Runtime;
|
||||
use log::info;
|
||||
use reconcile::guest::GuestReconciler;
|
||||
use reconcile::zone::ZoneReconciler;
|
||||
use tokio::{
|
||||
fs,
|
||||
net::UnixListener,
|
||||
@ -23,6 +22,7 @@ use tokio::{
|
||||
use tokio_stream::wrappers::UnixListenerStream;
|
||||
use tonic::transport::{Identity, Server, ServerTlsConfig};
|
||||
use uuid::Uuid;
|
||||
use zlt::ZoneLookupTable;
|
||||
|
||||
pub mod command;
|
||||
pub mod config;
|
||||
@ -31,21 +31,21 @@ pub mod control;
|
||||
pub mod db;
|
||||
pub mod devices;
|
||||
pub mod event;
|
||||
pub mod glt;
|
||||
pub mod idm;
|
||||
pub mod metrics;
|
||||
pub mod oci;
|
||||
pub mod reconcile;
|
||||
pub mod zlt;
|
||||
|
||||
pub struct Daemon {
|
||||
store: String,
|
||||
_config: Arc<DaemonConfig>,
|
||||
glt: GuestLookupTable,
|
||||
glt: ZoneLookupTable,
|
||||
devices: DaemonDeviceManager,
|
||||
guests: GuestStore,
|
||||
zones: ZoneStore,
|
||||
events: DaemonEventContext,
|
||||
guest_reconciler_task: JoinHandle<()>,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zone_reconciler_task: JoinHandle<()>,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
generator_task: JoinHandle<()>,
|
||||
idm: DaemonIdmHandle,
|
||||
console: DaemonConsoleHandle,
|
||||
@ -53,7 +53,7 @@ pub struct Daemon {
|
||||
runtime: Runtime,
|
||||
}
|
||||
|
||||
const GUEST_RECONCILER_QUEUE_LEN: usize = 1000;
|
||||
const ZONE_RECONCILER_QUEUE_LEN: usize = 1000;
|
||||
|
||||
impl Daemon {
|
||||
pub async fn new(store: String) -> Result<Self> {
|
||||
@ -89,40 +89,40 @@ impl Daemon {
|
||||
generated
|
||||
};
|
||||
|
||||
let initrd_path = detect_guest_path(&store, "initrd")?;
|
||||
let kernel_path = detect_guest_path(&store, "kernel")?;
|
||||
let addons_path = detect_guest_path(&store, "addons.squashfs")?;
|
||||
let initrd_path = detect_zone_path(&store, "initrd")?;
|
||||
let kernel_path = detect_zone_path(&store, "kernel")?;
|
||||
let addons_path = detect_zone_path(&store, "addons.squashfs")?;
|
||||
|
||||
let seed = config.oci.seed.clone().map(PathBuf::from);
|
||||
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
||||
let runtime = Runtime::new(host_uuid).await?;
|
||||
let glt = GuestLookupTable::new(0, host_uuid);
|
||||
let guests_db_path = format!("{}/guests.db", store);
|
||||
let guests = GuestStore::open(&PathBuf::from(guests_db_path))?;
|
||||
let (guest_reconciler_notify, guest_reconciler_receiver) =
|
||||
channel::<Uuid>(GUEST_RECONCILER_QUEUE_LEN);
|
||||
let glt = ZoneLookupTable::new(0, host_uuid);
|
||||
let zones_db_path = format!("{}/zones.db", store);
|
||||
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
|
||||
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
||||
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
||||
let idm = DaemonIdm::new(glt.clone()).await?;
|
||||
let idm = idm.launch().await?;
|
||||
let console = DaemonConsole::new(glt.clone()).await?;
|
||||
let console = console.launch().await?;
|
||||
let (events, generator) =
|
||||
DaemonEventGenerator::new(guests.clone(), guest_reconciler_notify.clone(), idm.clone())
|
||||
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
||||
.await?;
|
||||
let runtime_for_reconciler = runtime.dupe().await?;
|
||||
let guest_reconciler = GuestReconciler::new(
|
||||
let zone_reconciler = ZoneReconciler::new(
|
||||
devices.clone(),
|
||||
glt.clone(),
|
||||
guests.clone(),
|
||||
zones.clone(),
|
||||
events.clone(),
|
||||
runtime_for_reconciler,
|
||||
packer.clone(),
|
||||
guest_reconciler_notify.clone(),
|
||||
zone_reconciler_notify.clone(),
|
||||
kernel_path,
|
||||
initrd_path,
|
||||
addons_path,
|
||||
)?;
|
||||
|
||||
let guest_reconciler_task = guest_reconciler.launch(guest_reconciler_receiver).await?;
|
||||
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
||||
let generator_task = generator.launch().await?;
|
||||
|
||||
// TODO: Create a way of abstracting early init tasks in kratad.
|
||||
@ -139,10 +139,10 @@ impl Daemon {
|
||||
_config: config,
|
||||
glt,
|
||||
devices,
|
||||
guests,
|
||||
zones,
|
||||
events,
|
||||
guest_reconciler_task,
|
||||
guest_reconciler_notify,
|
||||
zone_reconciler_task,
|
||||
zone_reconciler_notify,
|
||||
generator_task,
|
||||
idm,
|
||||
console,
|
||||
@ -158,8 +158,8 @@ impl Daemon {
|
||||
self.events.clone(),
|
||||
self.console.clone(),
|
||||
self.idm.clone(),
|
||||
self.guests.clone(),
|
||||
self.guest_reconciler_notify.clone(),
|
||||
self.zones.clone(),
|
||||
self.zone_reconciler_notify.clone(),
|
||||
self.packer.clone(),
|
||||
self.runtime.clone(),
|
||||
);
|
||||
@ -214,20 +214,20 @@ impl Daemon {
|
||||
|
||||
impl Drop for Daemon {
|
||||
fn drop(&mut self) {
|
||||
self.guest_reconciler_task.abort();
|
||||
self.zone_reconciler_task.abort();
|
||||
self.generator_task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_guest_path(store: &str, name: &str) -> Result<PathBuf> {
|
||||
let mut path = PathBuf::from(format!("{}/guest/{}", store, name));
|
||||
fn detect_zone_path(store: &str, name: &str) -> Result<PathBuf> {
|
||||
let mut path = PathBuf::from(format!("{}/zone/{}", store, name));
|
||||
if path.is_file() {
|
||||
return Ok(path);
|
||||
}
|
||||
|
||||
path = PathBuf::from(format!("/usr/share/krata/guest/{}", name));
|
||||
path = PathBuf::from(format!("/usr/share/krata/zone/{}", name));
|
||||
if path.is_file() {
|
||||
return Ok(path);
|
||||
}
|
||||
Err(anyhow!("unable to find required guest file: {}", name))
|
||||
Err(anyhow!("unable to find required zone file: {}", name))
|
||||
}
|
||||
|
@ -1,20 +1,20 @@
|
||||
use krata::{
|
||||
idm::internal::{MetricFormat, MetricNode},
|
||||
v1::common::{GuestMetricFormat, GuestMetricNode},
|
||||
v1::common::{ZoneMetricFormat, ZoneMetricNode},
|
||||
};
|
||||
|
||||
fn idm_metric_format_to_api(format: MetricFormat) -> GuestMetricFormat {
|
||||
fn idm_metric_format_to_api(format: MetricFormat) -> ZoneMetricFormat {
|
||||
match format {
|
||||
MetricFormat::Unknown => GuestMetricFormat::Unknown,
|
||||
MetricFormat::Bytes => GuestMetricFormat::Bytes,
|
||||
MetricFormat::Integer => GuestMetricFormat::Integer,
|
||||
MetricFormat::DurationSeconds => GuestMetricFormat::DurationSeconds,
|
||||
MetricFormat::Unknown => ZoneMetricFormat::Unknown,
|
||||
MetricFormat::Bytes => ZoneMetricFormat::Bytes,
|
||||
MetricFormat::Integer => ZoneMetricFormat::Integer,
|
||||
MetricFormat::DurationSeconds => ZoneMetricFormat::DurationSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn idm_metric_to_api(node: MetricNode) -> GuestMetricNode {
|
||||
pub fn idm_metric_to_api(node: MetricNode) -> ZoneMetricNode {
|
||||
let format = node.format();
|
||||
GuestMetricNode {
|
||||
ZoneMetricNode {
|
||||
name: node.name,
|
||||
value: node.value,
|
||||
format: idm_metric_format_to_api(format).into(),
|
||||
|
@ -1 +1 @@
|
||||
pub mod guest;
|
||||
pub mod zone;
|
||||
|
@ -7,11 +7,11 @@ use std::{
|
||||
|
||||
use anyhow::Result;
|
||||
use krata::v1::{
|
||||
common::{Guest, GuestErrorInfo, GuestExitInfo, GuestNetworkState, GuestState, GuestStatus},
|
||||
control::GuestChangedEvent,
|
||||
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
|
||||
control::ZoneChangedEvent,
|
||||
};
|
||||
use krataoci::packer::service::OciPackerService;
|
||||
use kratart::{GuestInfo, Runtime};
|
||||
use kratart::{Runtime, ZoneInfo};
|
||||
use log::{error, info, trace, warn};
|
||||
use tokio::{
|
||||
select,
|
||||
@ -25,69 +25,69 @@ use tokio::{
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
db::GuestStore,
|
||||
db::ZoneStore,
|
||||
devices::DaemonDeviceManager,
|
||||
event::{DaemonEvent, DaemonEventContext},
|
||||
glt::GuestLookupTable,
|
||||
zlt::ZoneLookupTable,
|
||||
};
|
||||
|
||||
use self::start::GuestStarter;
|
||||
use self::start::ZoneStarter;
|
||||
|
||||
mod start;
|
||||
|
||||
const PARALLEL_LIMIT: u32 = 5;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum GuestReconcilerResult {
|
||||
enum ZoneReconcilerResult {
|
||||
Unchanged,
|
||||
Changed { rerun: bool },
|
||||
}
|
||||
|
||||
struct GuestReconcilerEntry {
|
||||
struct ZoneReconcilerEntry {
|
||||
task: JoinHandle<()>,
|
||||
sender: Sender<()>,
|
||||
}
|
||||
|
||||
impl Drop for GuestReconcilerEntry {
|
||||
impl Drop for ZoneReconcilerEntry {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GuestReconciler {
|
||||
pub struct ZoneReconciler {
|
||||
devices: DaemonDeviceManager,
|
||||
glt: GuestLookupTable,
|
||||
guests: GuestStore,
|
||||
zlt: ZoneLookupTable,
|
||||
zones: ZoneStore,
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
packer: OciPackerService,
|
||||
kernel_path: PathBuf,
|
||||
initrd_path: PathBuf,
|
||||
addons_path: PathBuf,
|
||||
tasks: Arc<Mutex<HashMap<Uuid, GuestReconcilerEntry>>>,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
reconcile_lock: Arc<RwLock<()>>,
|
||||
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
zone_reconcile_lock: Arc<RwLock<()>>,
|
||||
}
|
||||
|
||||
impl GuestReconciler {
|
||||
impl ZoneReconciler {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
devices: DaemonDeviceManager,
|
||||
glt: GuestLookupTable,
|
||||
guests: GuestStore,
|
||||
zlt: ZoneLookupTable,
|
||||
zones: ZoneStore,
|
||||
events: DaemonEventContext,
|
||||
runtime: Runtime,
|
||||
packer: OciPackerService,
|
||||
guest_reconciler_notify: Sender<Uuid>,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
kernel_path: PathBuf,
|
||||
initrd_path: PathBuf,
|
||||
modules_path: PathBuf,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
devices,
|
||||
glt,
|
||||
guests,
|
||||
zlt,
|
||||
zones,
|
||||
events,
|
||||
runtime,
|
||||
packer,
|
||||
@ -95,8 +95,8 @@ impl GuestReconciler {
|
||||
initrd_path,
|
||||
addons_path: modules_path,
|
||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||
guest_reconciler_notify,
|
||||
reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||
zone_reconciler_notify,
|
||||
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||
})
|
||||
}
|
||||
|
||||
@ -115,13 +115,13 @@ impl GuestReconciler {
|
||||
|
||||
Some(uuid) => {
|
||||
if let Err(error) = self.launch_task_if_needed(uuid).await {
|
||||
error!("failed to start guest reconciler task {}: {}", uuid, error);
|
||||
error!("failed to start zone reconciler task {}: {}", uuid, error);
|
||||
}
|
||||
|
||||
let map = self.tasks.lock().await;
|
||||
if let Some(entry) = map.get(&uuid) {
|
||||
if let Err(error) = entry.sender.send(()).await {
|
||||
error!("failed to notify guest reconciler task {}: {}", uuid, error);
|
||||
error!("failed to notify zone reconciler task {}: {}", uuid, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -138,52 +138,52 @@ impl GuestReconciler {
|
||||
}
|
||||
|
||||
pub async fn reconcile_runtime(&self, initial: bool) -> Result<()> {
|
||||
let _permit = self.reconcile_lock.write().await;
|
||||
let _permit = self.zone_reconcile_lock.write().await;
|
||||
trace!("reconciling runtime");
|
||||
let runtime_guests = self.runtime.list().await?;
|
||||
let stored_guests = self.guests.list().await?;
|
||||
let runtime_zones = self.runtime.list().await?;
|
||||
let stored_zones = self.zones.list().await?;
|
||||
|
||||
let non_existent_guests = runtime_guests
|
||||
let non_existent_zones = runtime_zones
|
||||
.iter()
|
||||
.filter(|x| !stored_guests.iter().any(|g| *g.0 == x.uuid))
|
||||
.filter(|x| !stored_zones.iter().any(|g| *g.0 == x.uuid))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for guest in non_existent_guests {
|
||||
warn!("destroying unknown runtime guest {}", guest.uuid);
|
||||
if let Err(error) = self.runtime.destroy(guest.uuid).await {
|
||||
for zone in non_existent_zones {
|
||||
warn!("destroying unknown runtime zone {}", zone.uuid);
|
||||
if let Err(error) = self.runtime.destroy(zone.uuid).await {
|
||||
error!(
|
||||
"failed to destroy unknown runtime guest {}: {}",
|
||||
guest.uuid, error
|
||||
"failed to destroy unknown runtime zone {}: {}",
|
||||
zone.uuid, error
|
||||
);
|
||||
}
|
||||
self.guests.remove(guest.uuid).await?;
|
||||
self.zones.remove(zone.uuid).await?;
|
||||
}
|
||||
|
||||
let mut device_claims = HashMap::new();
|
||||
|
||||
for (uuid, mut stored_guest) in stored_guests {
|
||||
let previous_guest = stored_guest.clone();
|
||||
let runtime_guest = runtime_guests.iter().find(|x| x.uuid == uuid);
|
||||
match runtime_guest {
|
||||
for (uuid, mut stored_zone) in stored_zones {
|
||||
let previous_zone = stored_zone.clone();
|
||||
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
||||
match runtime_zone {
|
||||
None => {
|
||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
||||
if state.status() == GuestStatus::Started {
|
||||
state.status = GuestStatus::Starting.into();
|
||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||
if state.status() == ZoneStatus::Started {
|
||||
state.status = ZoneStatus::Starting.into();
|
||||
}
|
||||
stored_guest.state = Some(state);
|
||||
stored_zone.state = Some(state);
|
||||
}
|
||||
|
||||
Some(runtime) => {
|
||||
self.glt.associate(uuid, runtime.domid).await;
|
||||
let mut state = stored_guest.state.as_mut().cloned().unwrap_or_default();
|
||||
self.zlt.associate(uuid, runtime.domid).await;
|
||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||
if let Some(code) = runtime.state.exit_code {
|
||||
state.status = GuestStatus::Exited.into();
|
||||
state.exit_info = Some(GuestExitInfo { code });
|
||||
state.status = ZoneStatus::Exited.into();
|
||||
state.exit_info = Some(ZoneExitInfo { code });
|
||||
} else {
|
||||
state.status = GuestStatus::Started.into();
|
||||
state.status = ZoneStatus::Started.into();
|
||||
}
|
||||
|
||||
for device in &stored_guest
|
||||
for device in &stored_zone
|
||||
.spec
|
||||
.as_ref()
|
||||
.cloned()
|
||||
@ -193,16 +193,16 @@ impl GuestReconciler {
|
||||
device_claims.insert(device.name.clone(), uuid);
|
||||
}
|
||||
|
||||
state.network = Some(guestinfo_to_networkstate(runtime));
|
||||
stored_guest.state = Some(state);
|
||||
state.network = Some(zoneinfo_to_networkstate(runtime));
|
||||
stored_zone.state = Some(state);
|
||||
}
|
||||
}
|
||||
|
||||
let changed = stored_guest != previous_guest;
|
||||
let changed = stored_zone != previous_zone;
|
||||
|
||||
if changed || initial {
|
||||
self.guests.update(uuid, stored_guest).await?;
|
||||
let _ = self.guest_reconciler_notify.try_send(uuid);
|
||||
self.zones.update(uuid, stored_zone).await?;
|
||||
let _ = self.zone_reconciler_notify.try_send(uuid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,59 +212,59 @@ impl GuestReconciler {
|
||||
}
|
||||
|
||||
pub async fn reconcile(&self, uuid: Uuid) -> Result<bool> {
|
||||
let _runtime_reconcile_permit = self.reconcile_lock.read().await;
|
||||
let Some(mut guest) = self.guests.read(uuid).await? else {
|
||||
let _runtime_reconcile_permit = self.zone_reconcile_lock.read().await;
|
||||
let Some(mut zone) = self.zones.read(uuid).await? else {
|
||||
warn!(
|
||||
"notified of reconcile for guest {} but it didn't exist",
|
||||
"notified of reconcile for zone {} but it didn't exist",
|
||||
uuid
|
||||
);
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
info!("reconciling guest {}", uuid);
|
||||
info!("reconciling zone {}", uuid);
|
||||
|
||||
self.events
|
||||
.send(DaemonEvent::GuestChanged(GuestChangedEvent {
|
||||
guest: Some(guest.clone()),
|
||||
.send(DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||
zone: Some(zone.clone()),
|
||||
}))?;
|
||||
|
||||
let start_status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let result = match start_status {
|
||||
GuestStatus::Starting => self.start(uuid, &mut guest).await,
|
||||
GuestStatus::Exited => self.exited(&mut guest).await,
|
||||
GuestStatus::Destroying => self.destroy(uuid, &mut guest).await,
|
||||
_ => Ok(GuestReconcilerResult::Unchanged),
|
||||
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
|
||||
ZoneStatus::Exited => self.exited(&mut zone).await,
|
||||
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
|
||||
_ => Ok(ZoneReconcilerResult::Unchanged),
|
||||
};
|
||||
|
||||
let result = match result {
|
||||
Ok(result) => result,
|
||||
Err(error) => {
|
||||
guest.state = Some(guest.state.as_mut().cloned().unwrap_or_default());
|
||||
guest.state.as_mut().unwrap().status = GuestStatus::Failed.into();
|
||||
guest.state.as_mut().unwrap().error_info = Some(GuestErrorInfo {
|
||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
|
||||
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
|
||||
message: error.to_string(),
|
||||
});
|
||||
warn!("failed to start guest {}: {}", guest.id, error);
|
||||
GuestReconcilerResult::Changed { rerun: false }
|
||||
warn!("failed to start zone {}: {}", zone.id, error);
|
||||
ZoneReconcilerResult::Changed { rerun: false }
|
||||
}
|
||||
};
|
||||
|
||||
info!("reconciled guest {}", uuid);
|
||||
info!("reconciled zone {}", uuid);
|
||||
|
||||
let status = guest.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let destroyed = status == GuestStatus::Destroyed;
|
||||
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let destroyed = status == ZoneStatus::Destroyed;
|
||||
|
||||
let rerun = if let GuestReconcilerResult::Changed { rerun } = result {
|
||||
let event = DaemonEvent::GuestChanged(GuestChangedEvent {
|
||||
guest: Some(guest.clone()),
|
||||
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
||||
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||
zone: Some(zone.clone()),
|
||||
});
|
||||
|
||||
if destroyed {
|
||||
self.guests.remove(uuid).await?;
|
||||
self.zones.remove(uuid).await?;
|
||||
let mut map = self.tasks.lock().await;
|
||||
map.remove(&uuid);
|
||||
} else {
|
||||
self.guests.update(uuid, guest.clone()).await?;
|
||||
self.zones.update(uuid, zone.clone()).await?;
|
||||
}
|
||||
|
||||
self.events.send(event)?;
|
||||
@ -276,50 +276,50 @@ impl GuestReconciler {
|
||||
Ok(rerun)
|
||||
}
|
||||
|
||||
async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
let starter = GuestStarter {
|
||||
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
let starter = ZoneStarter {
|
||||
devices: &self.devices,
|
||||
kernel_path: &self.kernel_path,
|
||||
initrd_path: &self.initrd_path,
|
||||
addons_path: &self.addons_path,
|
||||
packer: &self.packer,
|
||||
glt: &self.glt,
|
||||
glt: &self.zlt,
|
||||
runtime: &self.runtime,
|
||||
};
|
||||
starter.start(uuid, guest).await
|
||||
starter.start(uuid, zone).await
|
||||
}
|
||||
|
||||
async fn exited(&self, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
if let Some(ref mut state) = guest.state {
|
||||
state.set_status(GuestStatus::Destroying);
|
||||
Ok(GuestReconcilerResult::Changed { rerun: true })
|
||||
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
if let Some(ref mut state) = zone.state {
|
||||
state.set_status(ZoneStatus::Destroying);
|
||||
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
||||
} else {
|
||||
Ok(GuestReconcilerResult::Unchanged)
|
||||
Ok(ZoneReconcilerResult::Unchanged)
|
||||
}
|
||||
}
|
||||
|
||||
async fn destroy(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
async fn destroy(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
if let Err(error) = self.runtime.destroy(uuid).await {
|
||||
trace!("failed to destroy runtime guest {}: {}", uuid, error);
|
||||
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
||||
}
|
||||
|
||||
let domid = guest.state.as_ref().map(|x| x.domid);
|
||||
let domid = zone.state.as_ref().map(|x| x.domid);
|
||||
|
||||
if let Some(domid) = domid {
|
||||
self.glt.remove(uuid, domid).await;
|
||||
self.zlt.remove(uuid, domid).await;
|
||||
}
|
||||
|
||||
info!("destroyed guest {}", uuid);
|
||||
guest.state = Some(GuestState {
|
||||
status: GuestStatus::Destroyed.into(),
|
||||
info!("destroyed zone {}", uuid);
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Destroyed.into(),
|
||||
network: None,
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
host: self.glt.host_uuid().to_string(),
|
||||
host: self.zlt.host_uuid().to_string(),
|
||||
domid: domid.unwrap_or(u32::MAX),
|
||||
});
|
||||
self.devices.release_all(uuid).await?;
|
||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||
}
|
||||
|
||||
async fn launch_task_if_needed(&self, uuid: Uuid) -> Result<()> {
|
||||
@ -333,7 +333,7 @@ impl GuestReconciler {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn launch_task(&self, uuid: Uuid) -> Result<GuestReconcilerEntry> {
|
||||
async fn launch_task(&self, uuid: Uuid) -> Result<ZoneReconcilerEntry> {
|
||||
let this = self.clone();
|
||||
let (sender, mut receiver) = channel(10);
|
||||
let task = tokio::task::spawn(async move {
|
||||
@ -346,7 +346,7 @@ impl GuestReconciler {
|
||||
let rerun = match this.reconcile(uuid).await {
|
||||
Ok(rerun) => rerun,
|
||||
Err(error) => {
|
||||
error!("failed to reconcile guest {}: {}", uuid, error);
|
||||
error!("failed to reconcile zone {}: {}", uuid, error);
|
||||
false
|
||||
}
|
||||
};
|
||||
@ -358,15 +358,15 @@ impl GuestReconciler {
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(GuestReconcilerEntry { task, sender })
|
||||
Ok(ZoneReconcilerEntry { task, sender })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn guestinfo_to_networkstate(info: &GuestInfo) -> GuestNetworkState {
|
||||
GuestNetworkState {
|
||||
guest_ipv4: info.guest_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||
guest_ipv6: info.guest_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||
guest_mac: info.guest_mac.as_ref().cloned().unwrap_or_default(),
|
||||
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
|
||||
ZoneNetworkState {
|
||||
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
|
||||
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
@ -6,40 +6,40 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::StreamExt;
|
||||
use krata::launchcfg::LaunchPackedFormat;
|
||||
use krata::v1::common::GuestOciImageSpec;
|
||||
use krata::v1::common::{guest_image_spec::Image, Guest, GuestState, GuestStatus, OciImageFormat};
|
||||
use krata::v1::common::ZoneOciImageSpec;
|
||||
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
||||
use kratart::{launch::GuestLaunchRequest, Runtime};
|
||||
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
||||
use log::info;
|
||||
|
||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
||||
use crate::devices::DaemonDeviceManager;
|
||||
use crate::{
|
||||
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
|
||||
zlt::ZoneLookupTable,
|
||||
};
|
||||
use krata::v1::common::zone_image_spec::Image;
|
||||
use tokio::fs::{self, File};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio_tar::Archive;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
||||
use crate::devices::DaemonDeviceManager;
|
||||
use crate::{
|
||||
glt::GuestLookupTable,
|
||||
reconcile::guest::{guestinfo_to_networkstate, GuestReconcilerResult},
|
||||
};
|
||||
|
||||
pub struct GuestStarter<'a> {
|
||||
pub struct ZoneStarter<'a> {
|
||||
pub devices: &'a DaemonDeviceManager,
|
||||
pub kernel_path: &'a Path,
|
||||
pub initrd_path: &'a Path,
|
||||
pub addons_path: &'a Path,
|
||||
pub packer: &'a OciPackerService,
|
||||
pub glt: &'a GuestLookupTable,
|
||||
pub glt: &'a ZoneLookupTable,
|
||||
pub runtime: &'a Runtime,
|
||||
}
|
||||
|
||||
impl GuestStarter<'_> {
|
||||
impl ZoneStarter<'_> {
|
||||
pub async fn oci_spec_tar_read_file(
|
||||
&self,
|
||||
file: &Path,
|
||||
oci: &GuestOciImageSpec,
|
||||
oci: &ZoneOciImageSpec,
|
||||
) -> Result<Vec<u8>> {
|
||||
if oci.format() != OciImageFormat::Tar {
|
||||
return Err(anyhow!(
|
||||
@ -75,9 +75,9 @@ impl GuestStarter<'_> {
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn start(&self, uuid: Uuid, guest: &mut Guest) -> Result<GuestReconcilerResult> {
|
||||
let Some(ref spec) = guest.spec else {
|
||||
return Err(anyhow!("guest spec not specified"));
|
||||
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
let Some(ref spec) = zone.spec else {
|
||||
return Err(anyhow!("zone spec not specified"));
|
||||
};
|
||||
|
||||
let Some(ref image) = spec.image else {
|
||||
@ -100,7 +100,7 @@ impl GuestStarter<'_> {
|
||||
OciImageFormat::Squashfs => OciPackedFormat::Squashfs,
|
||||
OciImageFormat::Erofs => OciPackedFormat::Erofs,
|
||||
OciImageFormat::Tar => {
|
||||
return Err(anyhow!("tar image format is not supported for guests"));
|
||||
return Err(anyhow!("tar image format is not supported for zones"));
|
||||
}
|
||||
},
|
||||
)
|
||||
@ -176,7 +176,7 @@ impl GuestStarter<'_> {
|
||||
|
||||
let info = self
|
||||
.runtime
|
||||
.launch(GuestLaunchRequest {
|
||||
.launch(ZoneLaunchRequest {
|
||||
format: LaunchPackedFormat::Squashfs,
|
||||
uuid: Some(uuid),
|
||||
name: if spec.name.is_empty() {
|
||||
@ -201,17 +201,17 @@ impl GuestStarter<'_> {
|
||||
})
|
||||
.await?;
|
||||
self.glt.associate(uuid, info.domid).await;
|
||||
info!("started guest {}", uuid);
|
||||
guest.state = Some(GuestState {
|
||||
status: GuestStatus::Started.into(),
|
||||
network: Some(guestinfo_to_networkstate(&info)),
|
||||
info!("started zone {}", uuid);
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Started.into(),
|
||||
network: Some(zoneinfo_to_networkstate(&info)),
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
host: self.glt.host_uuid().to_string(),
|
||||
domid: info.domid,
|
||||
});
|
||||
success.store(true, Ordering::Release);
|
||||
Ok(GuestReconcilerResult::Changed { rerun: false })
|
||||
Ok(ZoneReconcilerResult::Changed { rerun: false })
|
||||
}
|
||||
}
|
||||
|
@ -3,18 +3,18 @@ use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
struct GuestLookupTableState {
|
||||
struct ZoneLookupTableState {
|
||||
domid_to_uuid: HashMap<u32, Uuid>,
|
||||
uuid_to_domid: HashMap<Uuid, u32>,
|
||||
}
|
||||
|
||||
impl GuestLookupTableState {
|
||||
impl ZoneLookupTableState {
|
||||
pub fn new(host_uuid: Uuid) -> Self {
|
||||
let mut domid_to_uuid = HashMap::new();
|
||||
let mut uuid_to_domid = HashMap::new();
|
||||
domid_to_uuid.insert(0, host_uuid);
|
||||
uuid_to_domid.insert(host_uuid, 0);
|
||||
GuestLookupTableState {
|
||||
ZoneLookupTableState {
|
||||
domid_to_uuid,
|
||||
uuid_to_domid,
|
||||
}
|
||||
@ -22,18 +22,18 @@ impl GuestLookupTableState {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GuestLookupTable {
|
||||
pub struct ZoneLookupTable {
|
||||
host_domid: u32,
|
||||
host_uuid: Uuid,
|
||||
state: Arc<RwLock<GuestLookupTableState>>,
|
||||
state: Arc<RwLock<ZoneLookupTableState>>,
|
||||
}
|
||||
|
||||
impl GuestLookupTable {
|
||||
impl ZoneLookupTable {
|
||||
pub fn new(host_domid: u32, host_uuid: Uuid) -> Self {
|
||||
GuestLookupTable {
|
||||
ZoneLookupTable {
|
||||
host_domid,
|
||||
host_uuid,
|
||||
state: Arc::new(RwLock::new(GuestLookupTableState::new(host_uuid))),
|
||||
state: Arc::new(RwLock::new(ZoneLookupTableState::new(host_uuid))),
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user