mirror of
https://github.com/edera-dev/krata.git
synced 2025-08-04 05:31:32 +00:00
feature(krata): prepare for workload rework (#276)
* chore(code): simple code cleanup * chore(code): additional code cleanup * feature(krata): rework api and make ip assignment persistent to database * rework and cleanup * fix daemon config references
This commit is contained in:
@ -9,6 +9,7 @@ edition = "2021"
|
||||
resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
krata-advmac = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
async-stream = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
@ -17,6 +18,7 @@ circular-buffer = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
env_logger = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
ipnetwork = { workspace = true }
|
||||
krata = { path = "../krata", version = "^0.0.15" }
|
||||
krata-oci = { path = "../oci", version = "^0.0.15" }
|
||||
krata-runtime = { path = "../runtime", version = "^0.0.15" }
|
||||
@ -25,6 +27,7 @@ prost = { workspace = true }
|
||||
redb = { workspace = true }
|
||||
scopeguard = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
signal-hook = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
|
@ -10,6 +10,8 @@ pub struct DaemonConfig {
|
||||
pub oci: OciConfig,
|
||||
#[serde(default)]
|
||||
pub pci: DaemonPciConfig,
|
||||
#[serde(default = "default_network")]
|
||||
pub network: DaemonNetworkConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
@ -49,6 +51,65 @@ pub enum DaemonPciDeviceRdmReservePolicy {
|
||||
Relaxed,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct DaemonNetworkConfig {
|
||||
#[serde(default = "default_network_nameservers")]
|
||||
pub nameservers: Vec<String>,
|
||||
#[serde(default = "default_network_ipv4")]
|
||||
pub ipv4: DaemonIpv4NetworkConfig,
|
||||
#[serde(default = "default_network_ipv6")]
|
||||
pub ipv6: DaemonIpv6NetworkConfig,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct DaemonIpv4NetworkConfig {
|
||||
#[serde(default = "default_network_ipv4_subnet")]
|
||||
pub subnet: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct DaemonIpv6NetworkConfig {
|
||||
#[serde(default = "default_network_ipv6_subnet")]
|
||||
pub subnet: String,
|
||||
}
|
||||
|
||||
fn default_network() -> DaemonNetworkConfig {
|
||||
DaemonNetworkConfig {
|
||||
nameservers: default_network_nameservers(),
|
||||
ipv4: default_network_ipv4(),
|
||||
ipv6: default_network_ipv6(),
|
||||
}
|
||||
}
|
||||
|
||||
fn default_network_nameservers() -> Vec<String> {
|
||||
vec![
|
||||
"1.1.1.1".to_string(),
|
||||
"1.0.0.1".to_string(),
|
||||
"2606:4700:4700::1111".to_string(),
|
||||
"2606:4700:4700::1001".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
fn default_network_ipv4() -> DaemonIpv4NetworkConfig {
|
||||
DaemonIpv4NetworkConfig {
|
||||
subnet: default_network_ipv4_subnet(),
|
||||
}
|
||||
}
|
||||
|
||||
fn default_network_ipv4_subnet() -> String {
|
||||
"10.75.80.0/24".to_string()
|
||||
}
|
||||
|
||||
fn default_network_ipv6() -> DaemonIpv6NetworkConfig {
|
||||
DaemonIpv6NetworkConfig {
|
||||
subnet: default_network_ipv6_subnet(),
|
||||
}
|
||||
}
|
||||
|
||||
fn default_network_ipv6_subnet() -> String {
|
||||
"fdd4:1476:6c7e::/48".to_string()
|
||||
}
|
||||
|
||||
impl DaemonConfig {
|
||||
pub async fn load(path: &Path) -> Result<DaemonConfig> {
|
||||
if path.exists() {
|
||||
|
@ -1,5 +1,15 @@
|
||||
use crate::db::zone::ZoneStore;
|
||||
use crate::{
|
||||
command::DaemonCommand, console::DaemonConsoleHandle, devices::DaemonDeviceManager,
|
||||
event::DaemonEventContext, idm::DaemonIdmHandle, metrics::idm_metric_to_api,
|
||||
oci::convert_oci_progress, zlt::ZoneLookupTable,
|
||||
};
|
||||
use async_stream::try_stream;
|
||||
use futures::Stream;
|
||||
use krata::v1::control::{
|
||||
GetZoneReply, GetZoneRequest, SetHostPowerManagementPolicyReply,
|
||||
SetHostPowerManagementPolicyRequest,
|
||||
};
|
||||
use krata::{
|
||||
idm::internal::{
|
||||
exec_stream_request_update::Update, request::Request as IdmRequestType,
|
||||
@ -10,11 +20,11 @@ use krata::{
|
||||
common::{OciImageFormat, Zone, ZoneState, ZoneStatus},
|
||||
control::{
|
||||
control_service_server::ControlService, CreateZoneReply, CreateZoneRequest,
|
||||
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecZoneReply, ExecZoneRequest,
|
||||
HostCpuTopologyInfo, HostCpuTopologyReply, HostCpuTopologyRequest,
|
||||
HostPowerManagementPolicy, IdentifyHostReply, IdentifyHostRequest, ListDevicesReply,
|
||||
DestroyZoneReply, DestroyZoneRequest, DeviceInfo, ExecInsideZoneReply,
|
||||
ExecInsideZoneRequest, GetHostCpuTopologyReply, GetHostCpuTopologyRequest,
|
||||
HostCpuTopologyInfo, HostStatusReply, HostStatusRequest, ListDevicesReply,
|
||||
ListDevicesRequest, ListZonesReply, ListZonesRequest, PullImageReply, PullImageRequest,
|
||||
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneReply, ResolveZoneRequest,
|
||||
ReadZoneMetricsReply, ReadZoneMetricsRequest, ResolveZoneIdReply, ResolveZoneIdRequest,
|
||||
SnoopIdmReply, SnoopIdmRequest, WatchEventsReply, WatchEventsRequest, ZoneConsoleReply,
|
||||
ZoneConsoleRequest,
|
||||
},
|
||||
@ -36,12 +46,6 @@ use tokio_stream::StreamExt;
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
command::DaemonCommand, console::DaemonConsoleHandle, db::ZoneStore,
|
||||
devices::DaemonDeviceManager, event::DaemonEventContext, idm::DaemonIdmHandle,
|
||||
metrics::idm_metric_to_api, oci::convert_oci_progress, zlt::ZoneLookupTable,
|
||||
};
|
||||
|
||||
pub struct ApiError {
|
||||
message: String,
|
||||
}
|
||||
@ -112,8 +116,8 @@ enum PullImageSelect {
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl ControlService for DaemonControlService {
|
||||
type ExecZoneStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ExecZoneReply, Status>> + Send + 'static>>;
|
||||
type ExecInsideZoneStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ExecInsideZoneReply, Status>> + Send + 'static>>;
|
||||
|
||||
type AttachZoneConsoleStream =
|
||||
Pin<Box<dyn Stream<Item = Result<ZoneConsoleReply, Status>> + Send + 'static>>;
|
||||
@ -127,12 +131,12 @@ impl ControlService for DaemonControlService {
|
||||
type SnoopIdmStream =
|
||||
Pin<Box<dyn Stream<Item = Result<SnoopIdmReply, Status>> + Send + 'static>>;
|
||||
|
||||
async fn identify_host(
|
||||
async fn host_status(
|
||||
&self,
|
||||
request: Request<IdentifyHostRequest>,
|
||||
) -> Result<Response<IdentifyHostReply>, Status> {
|
||||
request: Request<HostStatusRequest>,
|
||||
) -> Result<Response<HostStatusReply>, Status> {
|
||||
let _ = request.into_inner();
|
||||
Ok(Response::new(IdentifyHostReply {
|
||||
Ok(Response::new(HostStatusReply {
|
||||
host_domid: self.glt.host_domid(),
|
||||
host_uuid: self.glt.host_uuid().to_string(),
|
||||
krata_version: DaemonCommand::version(),
|
||||
@ -156,11 +160,11 @@ impl ControlService for DaemonControlService {
|
||||
uuid,
|
||||
Zone {
|
||||
id: uuid.to_string(),
|
||||
state: Some(ZoneState {
|
||||
status: ZoneStatus::Starting.into(),
|
||||
network: None,
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
status: Some(ZoneStatus {
|
||||
state: ZoneState::Creating.into(),
|
||||
network_status: None,
|
||||
exit_status: None,
|
||||
error_status: None,
|
||||
host: self.glt.host_uuid().to_string(),
|
||||
domid: u32::MAX,
|
||||
}),
|
||||
@ -180,10 +184,10 @@ impl ControlService for DaemonControlService {
|
||||
}))
|
||||
}
|
||||
|
||||
async fn exec_zone(
|
||||
async fn exec_inside_zone(
|
||||
&self,
|
||||
request: Request<Streaming<ExecZoneRequest>>,
|
||||
) -> Result<Response<Self::ExecZoneStream>, Status> {
|
||||
request: Request<Streaming<ExecInsideZoneRequest>>,
|
||||
) -> Result<Response<Self::ExecInsideZoneStream>, Status> {
|
||||
let mut input = request.into_inner();
|
||||
let Some(request) = input.next().await else {
|
||||
return Err(ApiError {
|
||||
@ -232,7 +236,7 @@ impl ControlService for DaemonControlService {
|
||||
loop {
|
||||
select! {
|
||||
x = input.next() => if let Some(update) = x {
|
||||
let update: Result<ExecZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||
let update: Result<ExecInsideZoneRequest, Status> = update.map_err(|error| ApiError {
|
||||
message: error.to_string()
|
||||
}.into());
|
||||
|
||||
@ -252,7 +256,7 @@ impl ControlService for DaemonControlService {
|
||||
let Some(IdmResponseType::ExecStream(update)) = response.response else {
|
||||
break;
|
||||
};
|
||||
let reply = ExecZoneReply {
|
||||
let reply = ExecInsideZoneReply {
|
||||
exited: update.exited,
|
||||
error: update.error,
|
||||
exit_code: update.exit_code,
|
||||
@ -265,11 +269,11 @@ impl ControlService for DaemonControlService {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response::new(Box::pin(output) as Self::ExecZoneStream))
|
||||
Ok(Response::new(Box::pin(output) as Self::ExecInsideZoneStream))
|
||||
}
|
||||
|
||||
async fn destroy_zone(
|
||||
@ -287,16 +291,16 @@ impl ControlService for DaemonControlService {
|
||||
.into());
|
||||
};
|
||||
|
||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
|
||||
|
||||
if zone.state.as_ref().unwrap().status() == ZoneStatus::Destroyed {
|
||||
if zone.status.as_ref().unwrap().state() == ZoneState::Destroyed {
|
||||
return Err(ApiError {
|
||||
message: "zone already destroyed".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
zone.state.as_mut().unwrap().status = ZoneStatus::Destroying.into();
|
||||
zone.status.as_mut().unwrap().state = ZoneState::Destroying.into();
|
||||
self.zones
|
||||
.update(uuid, zone)
|
||||
.await
|
||||
@ -320,10 +324,10 @@ impl ControlService for DaemonControlService {
|
||||
Ok(Response::new(ListZonesReply { zones }))
|
||||
}
|
||||
|
||||
async fn resolve_zone(
|
||||
async fn resolve_zone_id(
|
||||
&self,
|
||||
request: Request<ResolveZoneRequest>,
|
||||
) -> Result<Response<ResolveZoneReply>, Status> {
|
||||
request: Request<ResolveZoneIdRequest>,
|
||||
) -> Result<Response<ResolveZoneIdReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||
let zones = zones
|
||||
@ -334,8 +338,8 @@ impl ControlService for DaemonControlService {
|
||||
|| x.id == request.name
|
||||
})
|
||||
.collect::<Vec<Zone>>();
|
||||
Ok(Response::new(ResolveZoneReply {
|
||||
zone: zones.first().cloned(),
|
||||
Ok(Response::new(ResolveZoneIdReply {
|
||||
zone_id: zones.first().cloned().map(|x| x.id).unwrap_or_default(),
|
||||
}))
|
||||
}
|
||||
|
||||
@ -558,8 +562,8 @@ impl ControlService for DaemonControlService {
|
||||
|
||||
async fn get_host_cpu_topology(
|
||||
&self,
|
||||
request: Request<HostCpuTopologyRequest>,
|
||||
) -> Result<Response<HostCpuTopologyReply>, Status> {
|
||||
request: Request<GetHostCpuTopologyRequest>,
|
||||
) -> Result<Response<GetHostCpuTopologyReply>, Status> {
|
||||
let _ = request.into_inner();
|
||||
let power = self
|
||||
.runtime
|
||||
@ -579,13 +583,13 @@ impl ControlService for DaemonControlService {
|
||||
})
|
||||
}
|
||||
|
||||
Ok(Response::new(HostCpuTopologyReply { cpus }))
|
||||
Ok(Response::new(GetHostCpuTopologyReply { cpus }))
|
||||
}
|
||||
|
||||
async fn set_host_power_management_policy(
|
||||
&self,
|
||||
request: Request<HostPowerManagementPolicy>,
|
||||
) -> Result<Response<HostPowerManagementPolicy>, Status> {
|
||||
request: Request<SetHostPowerManagementPolicyRequest>,
|
||||
) -> Result<Response<SetHostPowerManagementPolicyReply>, Status> {
|
||||
let policy = request.into_inner();
|
||||
let power = self
|
||||
.runtime
|
||||
@ -603,9 +607,20 @@ impl ControlService for DaemonControlService {
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
Ok(Response::new(HostPowerManagementPolicy {
|
||||
scheduler: scheduler.to_string(),
|
||||
smt_awareness: policy.smt_awareness,
|
||||
Ok(Response::new(SetHostPowerManagementPolicyReply {}))
|
||||
}
|
||||
|
||||
async fn get_zone(
|
||||
&self,
|
||||
request: Request<GetZoneRequest>,
|
||||
) -> Result<Response<GetZoneReply>, Status> {
|
||||
let request = request.into_inner();
|
||||
let zones = self.zones.list().await.map_err(ApiError::from)?;
|
||||
let zone = zones.get(&Uuid::from_str(&request.zone_id).map_err(|error| ApiError {
|
||||
message: error.to_string(),
|
||||
})?);
|
||||
Ok(Response::new(GetZoneReply {
|
||||
zone: zone.cloned(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
118
crates/daemon/src/db/ip.rs
Normal file
118
crates/daemon/src/db/ip.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use crate::db::KrataDatabase;
|
||||
use advmac::MacAddr6;
|
||||
use anyhow::Result;
|
||||
use log::error;
|
||||
use redb::{ReadableTable, TableDefinition};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use uuid::Uuid;
|
||||
|
||||
const IP_RESERVATION_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new("ip-reservation");
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IpReservationStore {
|
||||
db: KrataDatabase,
|
||||
}
|
||||
|
||||
impl IpReservationStore {
|
||||
pub fn open(db: KrataDatabase) -> Result<Self> {
|
||||
let write = db.database.begin_write()?;
|
||||
let _ = write.open_table(IP_RESERVATION_TABLE);
|
||||
write.commit()?;
|
||||
Ok(IpReservationStore { db })
|
||||
}
|
||||
|
||||
pub async fn read(&self, id: Uuid) -> Result<Option<IpReservation>> {
|
||||
let read = self.db.database.begin_read()?;
|
||||
let table = read.open_table(IP_RESERVATION_TABLE)?;
|
||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let bytes = entry.value();
|
||||
Ok(Some(serde_json::from_slice(bytes)?))
|
||||
}
|
||||
|
||||
pub async fn list(&self) -> Result<HashMap<Uuid, IpReservation>> {
|
||||
enum ListEntry {
|
||||
Valid(Uuid, IpReservation),
|
||||
Invalid(Uuid),
|
||||
}
|
||||
let mut reservations: HashMap<Uuid, IpReservation> = HashMap::new();
|
||||
|
||||
let corruptions = {
|
||||
let read = self.db.database.begin_read()?;
|
||||
let table = read.open_table(IP_RESERVATION_TABLE)?;
|
||||
table
|
||||
.iter()?
|
||||
.flat_map(|result| {
|
||||
result.map(|(key, value)| {
|
||||
let uuid = Uuid::from_u128_le(key.value());
|
||||
match serde_json::from_slice::<IpReservation>(value.value()) {
|
||||
Ok(reservation) => ListEntry::Valid(uuid, reservation),
|
||||
Err(error) => {
|
||||
error!(
|
||||
"found invalid ip reservation in database for uuid {}: {}",
|
||||
uuid, error
|
||||
);
|
||||
ListEntry::Invalid(uuid)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
.filter_map(|entry| match entry {
|
||||
ListEntry::Valid(uuid, reservation) => {
|
||||
reservations.insert(uuid, reservation);
|
||||
None
|
||||
}
|
||||
|
||||
ListEntry::Invalid(uuid) => Some(uuid),
|
||||
})
|
||||
.collect::<Vec<Uuid>>()
|
||||
};
|
||||
|
||||
if !corruptions.is_empty() {
|
||||
let write = self.db.database.begin_write()?;
|
||||
let mut table = write.open_table(IP_RESERVATION_TABLE)?;
|
||||
for corruption in corruptions {
|
||||
table.remove(corruption.to_u128_le())?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(reservations)
|
||||
}
|
||||
|
||||
pub async fn update(&self, id: Uuid, entry: IpReservation) -> Result<()> {
|
||||
let write = self.db.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(IP_RESERVATION_TABLE)?;
|
||||
let bytes = serde_json::to_vec(&entry)?;
|
||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||
}
|
||||
write.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||
let write = self.db.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(IP_RESERVATION_TABLE)?;
|
||||
table.remove(id.to_u128_le())?;
|
||||
}
|
||||
write.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct IpReservation {
|
||||
pub uuid: String,
|
||||
pub ipv4: Ipv4Addr,
|
||||
pub ipv6: Ipv6Addr,
|
||||
pub mac: MacAddr6,
|
||||
pub ipv4_prefix: u8,
|
||||
pub ipv6_prefix: u8,
|
||||
pub gateway_ipv4: Ipv4Addr,
|
||||
pub gateway_ipv6: Ipv6Addr,
|
||||
pub gateway_mac: MacAddr6,
|
||||
}
|
21
crates/daemon/src/db/mod.rs
Normal file
21
crates/daemon/src/db/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use anyhow::Result;
|
||||
use redb::Database;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod ip;
|
||||
pub mod zone;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct KrataDatabase {
|
||||
pub database: Arc<Database>,
|
||||
}
|
||||
|
||||
impl KrataDatabase {
|
||||
pub fn open(path: &Path) -> Result<Self> {
|
||||
let database = Database::create(path)?;
|
||||
Ok(KrataDatabase {
|
||||
database: Arc::new(database),
|
||||
})
|
||||
}
|
||||
}
|
@ -1,33 +1,31 @@
|
||||
use std::{collections::HashMap, path::Path, sync::Arc};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::db::KrataDatabase;
|
||||
use anyhow::Result;
|
||||
use krata::v1::common::Zone;
|
||||
use log::error;
|
||||
use prost::Message;
|
||||
use redb::{Database, ReadableTable, TableDefinition};
|
||||
use redb::{ReadableTable, TableDefinition};
|
||||
use uuid::Uuid;
|
||||
|
||||
const ZONES: TableDefinition<u128, &[u8]> = TableDefinition::new("zones");
|
||||
const ZONE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new("zone");
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ZoneStore {
|
||||
database: Arc<Database>,
|
||||
db: KrataDatabase,
|
||||
}
|
||||
|
||||
impl ZoneStore {
|
||||
pub fn open(path: &Path) -> Result<Self> {
|
||||
let database = Database::create(path)?;
|
||||
let write = database.begin_write()?;
|
||||
let _ = write.open_table(ZONES);
|
||||
pub fn open(db: KrataDatabase) -> Result<Self> {
|
||||
let write = db.database.begin_write()?;
|
||||
let _ = write.open_table(ZONE_TABLE);
|
||||
write.commit()?;
|
||||
Ok(ZoneStore {
|
||||
database: Arc::new(database),
|
||||
})
|
||||
Ok(ZoneStore { db })
|
||||
}
|
||||
|
||||
pub async fn read(&self, id: Uuid) -> Result<Option<Zone>> {
|
||||
let read = self.database.begin_read()?;
|
||||
let table = read.open_table(ZONES)?;
|
||||
let read = self.db.database.begin_read()?;
|
||||
let table = read.open_table(ZONE_TABLE)?;
|
||||
let Some(entry) = table.get(id.to_u128_le())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
@ -37,8 +35,8 @@ impl ZoneStore {
|
||||
|
||||
pub async fn list(&self) -> Result<HashMap<Uuid, Zone>> {
|
||||
let mut zones: HashMap<Uuid, Zone> = HashMap::new();
|
||||
let read = self.database.begin_read()?;
|
||||
let table = read.open_table(ZONES)?;
|
||||
let read = self.db.database.begin_read()?;
|
||||
let table = read.open_table(ZONE_TABLE)?;
|
||||
for result in table.iter()? {
|
||||
let (key, value) = result?;
|
||||
let uuid = Uuid::from_u128_le(key.value());
|
||||
@ -58,9 +56,9 @@ impl ZoneStore {
|
||||
}
|
||||
|
||||
pub async fn update(&self, id: Uuid, entry: Zone) -> Result<()> {
|
||||
let write = self.database.begin_write()?;
|
||||
let write = self.db.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(ZONES)?;
|
||||
let mut table = write.open_table(ZONE_TABLE)?;
|
||||
let bytes = entry.encode_to_vec();
|
||||
table.insert(id.to_u128_le(), bytes.as_slice())?;
|
||||
}
|
||||
@ -69,9 +67,9 @@ impl ZoneStore {
|
||||
}
|
||||
|
||||
pub async fn remove(&self, id: Uuid) -> Result<()> {
|
||||
let write = self.database.begin_write()?;
|
||||
let write = self.db.database.begin_write()?;
|
||||
{
|
||||
let mut table = write.open_table(ZONES)?;
|
||||
let mut table = write.open_table(ZONE_TABLE)?;
|
||||
table.remove(id.to_u128_le())?;
|
||||
}
|
||||
write.commit()?;
|
@ -4,9 +4,10 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::{db::ZoneStore, idm::DaemonIdmHandle};
|
||||
use crate::db::zone::ZoneStore;
|
||||
use crate::idm::DaemonIdmHandle;
|
||||
use anyhow::Result;
|
||||
use krata::v1::common::ZoneExitInfo;
|
||||
use krata::v1::common::ZoneExitStatus;
|
||||
use krata::{
|
||||
idm::{internal::event::Event as EventType, internal::Event},
|
||||
v1::common::{ZoneState, ZoneStatus},
|
||||
@ -83,15 +84,15 @@ impl DaemonEventGenerator {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let Some(ref state) = zone.state else {
|
||||
let Some(ref status) = zone.status else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let status = state.status();
|
||||
let state = status.state();
|
||||
let id = Uuid::from_str(&zone.id)?;
|
||||
let domid = state.domid;
|
||||
match status {
|
||||
ZoneStatus::Started => {
|
||||
let domid = status.domid;
|
||||
match state {
|
||||
ZoneState::Created => {
|
||||
if let Entry::Vacant(e) = self.idms.entry(domid) {
|
||||
let client = self.idm.client_by_domid(domid).await?;
|
||||
let mut receiver = client.subscribe().await?;
|
||||
@ -111,7 +112,7 @@ impl DaemonEventGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
ZoneStatus::Destroyed => {
|
||||
ZoneState::Destroyed => {
|
||||
if let Some((_, handle)) = self.idms.remove(&domid) {
|
||||
handle.abort();
|
||||
}
|
||||
@ -131,13 +132,13 @@ impl DaemonEventGenerator {
|
||||
|
||||
async fn handle_exit_code(&mut self, id: Uuid, code: i32) -> Result<()> {
|
||||
if let Some(mut zone) = self.zones.read(id).await? {
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Exited.into(),
|
||||
network: zone.state.clone().unwrap_or_default().network,
|
||||
exit_info: Some(ZoneExitInfo { code }),
|
||||
error_info: None,
|
||||
host: zone.state.clone().map(|x| x.host).unwrap_or_default(),
|
||||
domid: zone.state.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||
zone.status = Some(ZoneStatus {
|
||||
state: ZoneState::Exited.into(),
|
||||
network_status: zone.status.clone().unwrap_or_default().network_status,
|
||||
exit_status: Some(ZoneExitStatus { code }),
|
||||
error_status: None,
|
||||
host: zone.status.clone().map(|x| x.host).unwrap_or_default(),
|
||||
domid: zone.status.clone().map(|x| x.domid).unwrap_or(u32::MAX),
|
||||
});
|
||||
|
||||
self.zones.update(id, zone).await?;
|
||||
|
@ -85,13 +85,13 @@ pub struct DaemonIdm {
|
||||
|
||||
impl DaemonIdm {
|
||||
pub async fn new(glt: ZoneLookupTable) -> Result<DaemonIdm> {
|
||||
debug!("allocating channel for IDM");
|
||||
debug!("allocating channel service for idm");
|
||||
let (service, tx_raw_sender, rx_receiver) =
|
||||
ChannelService::new("krata-channel".to_string(), None).await?;
|
||||
let (tx_sender, tx_receiver) = channel(100);
|
||||
let (snoop_sender, _) = broadcast::channel(100);
|
||||
|
||||
debug!("starting channel service");
|
||||
debug!("starting idm channel service");
|
||||
let task = service.launch().await?;
|
||||
|
||||
let clients = Arc::new(Mutex::new(HashMap::new()));
|
||||
@ -133,52 +133,99 @@ impl DaemonIdm {
|
||||
})
|
||||
}
|
||||
|
||||
async fn process_rx_packet(
|
||||
&mut self,
|
||||
domid: u32,
|
||||
data: Option<Vec<u8>>,
|
||||
buffers: &mut HashMap<u32, BytesMut>,
|
||||
) -> Result<()> {
|
||||
// check if data is present, if it is not, that signals a closed channel.
|
||||
if let Some(data) = data {
|
||||
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
||||
buffer.extend_from_slice(&data);
|
||||
loop {
|
||||
// check if the buffer is less than the header size, if so, wait for more data
|
||||
if buffer.len() < 6 {
|
||||
break;
|
||||
}
|
||||
|
||||
// check for the magic bytes 0xff, 0xff at the start of the message, if that doesn't
|
||||
// exist, clear the buffer. this ensures that partial messages won't be processed.
|
||||
if buffer[0] != 0xff || buffer[1] != 0xff {
|
||||
buffer.clear();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// read the size from the buffer as a little endian u32
|
||||
let size = (buffer[2] as u32
|
||||
| (buffer[3] as u32) << 8
|
||||
| (buffer[4] as u32) << 16
|
||||
| (buffer[5] as u32) << 24) as usize;
|
||||
let needed = size + 6;
|
||||
if buffer.len() < needed {
|
||||
return Ok(());
|
||||
}
|
||||
let mut packet = buffer.split_to(needed);
|
||||
// advance the buffer by the header, leaving only the raw data.
|
||||
packet.advance(6);
|
||||
match IdmTransportPacket::decode(packet) {
|
||||
Ok(packet) => {
|
||||
let _ =
|
||||
client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds)
|
||||
.await?;
|
||||
let guard = self.feeds.lock().await;
|
||||
if let Some(feed) = guard.get(&domid) {
|
||||
let _ = feed.try_send(packet.clone());
|
||||
}
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
|
||||
from: domid,
|
||||
to: 0,
|
||||
packet,
|
||||
});
|
||||
}
|
||||
|
||||
Err(packet) => {
|
||||
warn!("received invalid packet from domain {}: {}", domid, packet);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut clients = self.clients.lock().await;
|
||||
let mut feeds = self.feeds.lock().await;
|
||||
clients.remove(&domid);
|
||||
feeds.remove(&domid);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tx_packet(&mut self, domid: u32, packet: IdmTransportPacket) -> Result<()> {
|
||||
let data = packet.encode_to_vec();
|
||||
let mut buffer = vec![0u8; 6];
|
||||
let length = data.len() as u32;
|
||||
// magic bytes
|
||||
buffer[0] = 0xff;
|
||||
buffer[1] = 0xff;
|
||||
// little endian u32 for message size
|
||||
buffer[2] = length as u8;
|
||||
buffer[3] = (length << 8) as u8;
|
||||
buffer[4] = (length << 16) as u8;
|
||||
buffer[5] = (length << 24) as u8;
|
||||
buffer.extend_from_slice(&data);
|
||||
self.tx_raw_sender.send((domid, buffer)).await?;
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket {
|
||||
from: 0,
|
||||
to: domid,
|
||||
packet,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process(&mut self, buffers: &mut HashMap<u32, BytesMut>) -> Result<()> {
|
||||
loop {
|
||||
select! {
|
||||
x = self.rx_receiver.recv() => match x {
|
||||
Some((domid, data)) => {
|
||||
if let Some(data) = data {
|
||||
let buffer = buffers.entry(domid).or_insert_with_key(|_| BytesMut::new());
|
||||
buffer.extend_from_slice(&data);
|
||||
loop {
|
||||
if buffer.len() < 6 {
|
||||
break;
|
||||
}
|
||||
|
||||
if buffer[0] != 0xff || buffer[1] != 0xff {
|
||||
buffer.clear();
|
||||
break;
|
||||
}
|
||||
|
||||
let size = (buffer[2] as u32 | (buffer[3] as u32) << 8 | (buffer[4] as u32) << 16 | (buffer[5] as u32) << 24) as usize;
|
||||
let needed = size + 6;
|
||||
if buffer.len() < needed {
|
||||
break;
|
||||
}
|
||||
let mut packet = buffer.split_to(needed);
|
||||
packet.advance(6);
|
||||
match IdmTransportPacket::decode(packet) {
|
||||
Ok(packet) => {
|
||||
let _ = client_or_create(domid, &self.tx_sender, &self.clients, &self.feeds).await?;
|
||||
let guard = self.feeds.lock().await;
|
||||
if let Some(feed) = guard.get(&domid) {
|
||||
let _ = feed.try_send(packet.clone());
|
||||
}
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: domid, to: 0, packet });
|
||||
}
|
||||
|
||||
Err(packet) => {
|
||||
warn!("received invalid packet from domain {}: {}", domid, packet);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut clients = self.clients.lock().await;
|
||||
let mut feeds = self.feeds.lock().await;
|
||||
clients.remove(&domid);
|
||||
feeds.remove(&domid);
|
||||
}
|
||||
self.process_rx_packet(domid, data, buffers).await?;
|
||||
},
|
||||
|
||||
None => {
|
||||
@ -187,25 +234,14 @@ impl DaemonIdm {
|
||||
},
|
||||
x = self.tx_receiver.recv() => match x {
|
||||
Some((domid, packet)) => {
|
||||
let data = packet.encode_to_vec();
|
||||
let mut buffer = vec![0u8; 6];
|
||||
let length = data.len() as u32;
|
||||
buffer[0] = 0xff;
|
||||
buffer[1] = 0xff;
|
||||
buffer[2] = length as u8;
|
||||
buffer[3] = (length << 8) as u8;
|
||||
buffer[4] = (length << 16) as u8;
|
||||
buffer[5] = (length << 24) as u8;
|
||||
buffer.extend_from_slice(&data);
|
||||
self.tx_raw_sender.send((domid, buffer)).await?;
|
||||
let _ = self.snoop_sender.send(DaemonIdmSnoopPacket { from: 0, to: domid, packet });
|
||||
self.tx_packet(domid, packet).await?;
|
||||
},
|
||||
|
||||
None => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
179
crates/daemon/src/ip/assignment.rs
Normal file
179
crates/daemon/src/ip/assignment.rs
Normal file
@ -0,0 +1,179 @@
|
||||
use advmac::MacAddr6;
|
||||
use anyhow::{anyhow, Result};
|
||||
use ipnetwork::{Ipv4Network, Ipv6Network};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{Ipv4Addr, Ipv6Addr},
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db::ip::{IpReservation, IpReservationStore};
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct IpAssignmentState {
|
||||
pub ipv4: HashMap<Ipv4Addr, IpReservation>,
|
||||
pub ipv6: HashMap<Ipv6Addr, IpReservation>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IpAssignment {
|
||||
ipv4_network: Ipv4Network,
|
||||
ipv6_network: Ipv6Network,
|
||||
gateway_ipv4: Ipv4Addr,
|
||||
gateway_ipv6: Ipv6Addr,
|
||||
gateway_mac: MacAddr6,
|
||||
store: IpReservationStore,
|
||||
state: Arc<RwLock<IpAssignmentState>>,
|
||||
}
|
||||
|
||||
impl IpAssignment {
|
||||
pub async fn new(
|
||||
host_uuid: Uuid,
|
||||
ipv4_network: Ipv4Network,
|
||||
ipv6_network: Ipv6Network,
|
||||
store: IpReservationStore,
|
||||
) -> Result<Self> {
|
||||
let mut state = IpAssignment::fetch_current_state(&store).await?;
|
||||
let reservation = if let Some(reservation) = store.read(host_uuid).await? {
|
||||
reservation
|
||||
} else {
|
||||
IpAssignment::allocate(
|
||||
&mut state,
|
||||
&store,
|
||||
host_uuid,
|
||||
ipv4_network,
|
||||
ipv6_network,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
let assignment = IpAssignment {
|
||||
ipv4_network,
|
||||
ipv6_network,
|
||||
gateway_ipv4: reservation.ipv4,
|
||||
gateway_ipv6: reservation.ipv6,
|
||||
gateway_mac: reservation.gateway_mac,
|
||||
store,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
};
|
||||
Ok(assignment)
|
||||
}
|
||||
|
||||
async fn fetch_current_state(store: &IpReservationStore) -> Result<IpAssignmentState> {
|
||||
let reservations = store.list().await?;
|
||||
let mut state = IpAssignmentState::default();
|
||||
for reservation in reservations.values() {
|
||||
state.ipv4.insert(reservation.ipv4, reservation.clone());
|
||||
state.ipv6.insert(reservation.ipv6, reservation.clone());
|
||||
}
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn allocate(
|
||||
state: &mut IpAssignmentState,
|
||||
store: &IpReservationStore,
|
||||
uuid: Uuid,
|
||||
ipv4_network: Ipv4Network,
|
||||
ipv6_network: Ipv6Network,
|
||||
gateway_ipv4: Option<Ipv4Addr>,
|
||||
gateway_ipv6: Option<Ipv6Addr>,
|
||||
gateway_mac: Option<MacAddr6>,
|
||||
) -> Result<IpReservation> {
|
||||
let found_ipv4: Option<Ipv4Addr> = ipv4_network
|
||||
.iter()
|
||||
.filter(|ip| {
|
||||
ip.is_private() && !(ip.is_loopback() || ip.is_multicast() || ip.is_broadcast())
|
||||
})
|
||||
.filter(|ip| {
|
||||
let last = ip.octets()[3];
|
||||
// filter for IPs ending in .1 to .250 because .250+ can have special meaning
|
||||
last > 0 && last < 250
|
||||
})
|
||||
.find(|ip| !state.ipv4.contains_key(ip));
|
||||
|
||||
let found_ipv6: Option<Ipv6Addr> = ipv6_network
|
||||
.iter()
|
||||
.filter(|ip| !ip.is_loopback() && !ip.is_multicast())
|
||||
.find(|ip| !state.ipv6.contains_key(ip));
|
||||
|
||||
let Some(ipv4) = found_ipv4 else {
|
||||
return Err(anyhow!(
|
||||
"unable to allocate ipv4 address, assigned network is exhausted"
|
||||
));
|
||||
};
|
||||
|
||||
let Some(ipv6) = found_ipv6 else {
|
||||
return Err(anyhow!(
|
||||
"unable to allocate ipv6 address, assigned network is exhausted"
|
||||
));
|
||||
};
|
||||
|
||||
let mut mac = MacAddr6::random();
|
||||
mac.set_local(false);
|
||||
mac.set_multicast(false);
|
||||
|
||||
let reservation = IpReservation {
|
||||
uuid: uuid.to_string(),
|
||||
ipv4,
|
||||
ipv6,
|
||||
mac,
|
||||
ipv4_prefix: ipv4_network.prefix(),
|
||||
ipv6_prefix: ipv6_network.prefix(),
|
||||
gateway_ipv4: gateway_ipv4.unwrap_or(ipv4),
|
||||
gateway_ipv6: gateway_ipv6.unwrap_or(ipv6),
|
||||
gateway_mac: gateway_mac.unwrap_or(mac),
|
||||
};
|
||||
state.ipv4.insert(ipv4, reservation.clone());
|
||||
state.ipv6.insert(ipv6, reservation.clone());
|
||||
store.update(uuid, reservation.clone()).await?;
|
||||
Ok(reservation)
|
||||
}
|
||||
|
||||
pub async fn assign(&self, uuid: Uuid) -> Result<IpReservation> {
|
||||
let mut state = self.state.write().await;
|
||||
let reservation = IpAssignment::allocate(
|
||||
&mut state,
|
||||
&self.store,
|
||||
uuid,
|
||||
self.ipv4_network,
|
||||
self.ipv6_network,
|
||||
Some(self.gateway_ipv4),
|
||||
Some(self.gateway_ipv6),
|
||||
Some(self.gateway_mac),
|
||||
)
|
||||
.await?;
|
||||
Ok(reservation)
|
||||
}
|
||||
|
||||
pub async fn recall(&self, uuid: Uuid) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
self.store.remove(uuid).await?;
|
||||
state
|
||||
.ipv4
|
||||
.retain(|_, reservation| reservation.uuid != uuid.to_string());
|
||||
state
|
||||
.ipv6
|
||||
.retain(|_, reservation| reservation.uuid != uuid.to_string());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn retrieve(&self, uuid: Uuid) -> Result<Option<IpReservation>> {
|
||||
self.store.read(uuid).await
|
||||
}
|
||||
|
||||
pub async fn reload(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
let intermediate = IpAssignment::fetch_current_state(&self.store).await?;
|
||||
*state = intermediate;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn read(&self) -> Result<IpAssignmentState> {
|
||||
Ok(self.state.read().await.clone())
|
||||
}
|
||||
}
|
1
crates/daemon/src/ip/mod.rs
Normal file
1
crates/daemon/src/ip/mod.rs
Normal file
@ -0,0 +1 @@
|
||||
pub mod assignment;
|
@ -1,18 +1,22 @@
|
||||
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
|
||||
|
||||
use crate::db::ip::IpReservationStore;
|
||||
use crate::db::zone::ZoneStore;
|
||||
use crate::db::KrataDatabase;
|
||||
use crate::ip::assignment::IpAssignment;
|
||||
use anyhow::{anyhow, Result};
|
||||
use config::DaemonConfig;
|
||||
use console::{DaemonConsole, DaemonConsoleHandle};
|
||||
use control::DaemonControlService;
|
||||
use db::ZoneStore;
|
||||
use devices::DaemonDeviceManager;
|
||||
use event::{DaemonEventContext, DaemonEventGenerator};
|
||||
use idm::{DaemonIdm, DaemonIdmHandle};
|
||||
use ipnetwork::{Ipv4Network, Ipv6Network};
|
||||
use krata::{dial::ControlDialAddress, v1::control::control_service_server::ControlServiceServer};
|
||||
use krataoci::{packer::service::OciPackerService, registry::OciPlatform};
|
||||
use kratart::Runtime;
|
||||
use log::{debug, info};
|
||||
use reconcile::zone::ZoneReconciler;
|
||||
use std::path::Path;
|
||||
use std::{net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc};
|
||||
use tokio::{
|
||||
fs,
|
||||
net::UnixListener,
|
||||
@ -32,6 +36,7 @@ pub mod db;
|
||||
pub mod devices;
|
||||
pub mod event;
|
||||
pub mod idm;
|
||||
pub mod ip;
|
||||
pub mod metrics;
|
||||
pub mod oci;
|
||||
pub mod reconcile;
|
||||
@ -101,31 +106,33 @@ impl Daemon {
|
||||
debug!("initializing caches and hydrating zone state");
|
||||
let seed = config.oci.seed.clone().map(PathBuf::from);
|
||||
let packer = OciPackerService::new(seed, &image_cache_dir, OciPlatform::current()).await?;
|
||||
let glt = ZoneLookupTable::new(0, host_uuid);
|
||||
let zones_db_path = format!("{}/zones.db", store);
|
||||
let zones = ZoneStore::open(&PathBuf::from(zones_db_path))?;
|
||||
debug!("initializing core runtime");
|
||||
let runtime = Runtime::new().await?;
|
||||
let zlt = ZoneLookupTable::new(0, host_uuid);
|
||||
let db_path = format!("{}/krata.db", store);
|
||||
let database = KrataDatabase::open(Path::new(&db_path))?;
|
||||
let zones = ZoneStore::open(database.clone())?;
|
||||
let (zone_reconciler_notify, zone_reconciler_receiver) =
|
||||
channel::<Uuid>(ZONE_RECONCILER_QUEUE_LEN);
|
||||
|
||||
debug!("initializing core runtime");
|
||||
let runtime = Runtime::new(host_uuid).await?;
|
||||
|
||||
debug!("starting IDM service");
|
||||
let idm = DaemonIdm::new(glt.clone()).await?;
|
||||
let idm = DaemonIdm::new(zlt.clone()).await?;
|
||||
let idm = idm.launch().await?;
|
||||
|
||||
debug!("initializing console interfaces");
|
||||
let console = DaemonConsole::new(glt.clone()).await?;
|
||||
let console = DaemonConsole::new(zlt.clone()).await?;
|
||||
let console = console.launch().await?;
|
||||
|
||||
debug!("initializing zone reconciler");
|
||||
let (events, generator) =
|
||||
DaemonEventGenerator::new(zones.clone(), zone_reconciler_notify.clone(), idm.clone())
|
||||
.await?;
|
||||
let runtime_for_reconciler = runtime.dupe().await?;
|
||||
let ipv4_network = Ipv4Network::from_str(&config.network.ipv4.subnet)?;
|
||||
let ipv6_network = Ipv6Network::from_str(&config.network.ipv6.subnet)?;
|
||||
let ip_reservation_store = IpReservationStore::open(database)?;
|
||||
let ip_assignment =
|
||||
IpAssignment::new(host_uuid, ipv4_network, ipv6_network, ip_reservation_store).await?;
|
||||
debug!("initializing zone reconciler");
|
||||
let zone_reconciler = ZoneReconciler::new(
|
||||
devices.clone(),
|
||||
glt.clone(),
|
||||
zlt.clone(),
|
||||
zones.clone(),
|
||||
events.clone(),
|
||||
runtime_for_reconciler,
|
||||
@ -134,6 +141,8 @@ impl Daemon {
|
||||
kernel_path,
|
||||
initrd_path,
|
||||
addons_path,
|
||||
ip_assignment,
|
||||
config.clone(),
|
||||
)?;
|
||||
|
||||
let zone_reconciler_task = zone_reconciler.launch(zone_reconciler_receiver).await?;
|
||||
@ -152,7 +161,7 @@ impl Daemon {
|
||||
Ok(Self {
|
||||
store,
|
||||
_config: config,
|
||||
glt,
|
||||
glt: zlt,
|
||||
devices,
|
||||
zones,
|
||||
events,
|
||||
@ -167,7 +176,7 @@ impl Daemon {
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self, addr: ControlDialAddress) -> Result<()> {
|
||||
debug!("starting API service");
|
||||
debug!("starting control service");
|
||||
let control_service = DaemonControlService::new(
|
||||
self.glt.clone(),
|
||||
self.devices.clone(),
|
||||
|
@ -1,41 +1,41 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::StreamExt;
|
||||
use krata::launchcfg::LaunchPackedFormat;
|
||||
use krata::v1::common::ZoneOciImageSpec;
|
||||
use krata::v1::common::{OciImageFormat, Zone, ZoneState, ZoneStatus};
|
||||
use krataoci::packer::{service::OciPackerService, OciPackedFormat};
|
||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy};
|
||||
use kratart::launch::{PciBdf, PciDevice, PciRdmReservePolicy, ZoneLaunchNetwork};
|
||||
use kratart::{launch::ZoneLaunchRequest, Runtime};
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use crate::config::DaemonPciDeviceRdmReservePolicy;
|
||||
use crate::config::{DaemonConfig, DaemonPciDeviceRdmReservePolicy};
|
||||
use crate::devices::DaemonDeviceManager;
|
||||
use crate::{
|
||||
reconcile::zone::{zoneinfo_to_networkstate, ZoneReconcilerResult},
|
||||
zlt::ZoneLookupTable,
|
||||
};
|
||||
use crate::ip::assignment::IpAssignment;
|
||||
use crate::reconcile::zone::ip_reservation_to_network_status;
|
||||
use crate::{reconcile::zone::ZoneReconcilerResult, zlt::ZoneLookupTable};
|
||||
use krata::v1::common::zone_image_spec::Image;
|
||||
use tokio::fs::{self, File};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio_tar::Archive;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct ZoneStarter<'a> {
|
||||
pub struct ZoneCreator<'a> {
|
||||
pub devices: &'a DaemonDeviceManager,
|
||||
pub kernel_path: &'a Path,
|
||||
pub initrd_path: &'a Path,
|
||||
pub addons_path: &'a Path,
|
||||
pub packer: &'a OciPackerService,
|
||||
pub glt: &'a ZoneLookupTable,
|
||||
pub ip_assignment: &'a IpAssignment,
|
||||
pub zlt: &'a ZoneLookupTable,
|
||||
pub runtime: &'a Runtime,
|
||||
pub config: &'a DaemonConfig,
|
||||
}
|
||||
|
||||
impl ZoneStarter<'_> {
|
||||
impl ZoneCreator<'_> {
|
||||
pub async fn oci_spec_tar_read_file(
|
||||
&self,
|
||||
file: &Path,
|
||||
@ -75,7 +75,7 @@ impl ZoneStarter<'_> {
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
pub async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
let Some(ref spec) = zone.spec else {
|
||||
return Err(anyhow!("zone spec not specified"));
|
||||
};
|
||||
@ -174,6 +174,8 @@ impl ZoneStarter<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
let reservation = self.ip_assignment.assign(uuid).await?;
|
||||
|
||||
let info = self
|
||||
.runtime
|
||||
.launch(ZoneLaunchRequest {
|
||||
@ -187,7 +189,7 @@ impl ZoneStarter<'_> {
|
||||
image,
|
||||
kernel,
|
||||
initrd,
|
||||
vcpus: spec.vcpus,
|
||||
vcpus: spec.cpus,
|
||||
mem: spec.mem,
|
||||
pcis,
|
||||
env: task
|
||||
@ -198,16 +200,26 @@ impl ZoneStarter<'_> {
|
||||
run: empty_vec_optional(task.command.clone()),
|
||||
debug: false,
|
||||
addons_image: Some(self.addons_path.to_path_buf()),
|
||||
network: ZoneLaunchNetwork {
|
||||
ipv4: reservation.ipv4.to_string(),
|
||||
ipv4_prefix: reservation.ipv4_prefix,
|
||||
ipv6: reservation.ipv6.to_string(),
|
||||
ipv6_prefix: reservation.ipv6_prefix,
|
||||
gateway_ipv4: reservation.gateway_ipv4.to_string(),
|
||||
gateway_ipv6: reservation.gateway_ipv6.to_string(),
|
||||
zone_mac: reservation.mac,
|
||||
nameservers: self.config.network.nameservers.clone(),
|
||||
},
|
||||
})
|
||||
.await?;
|
||||
self.glt.associate(uuid, info.domid).await;
|
||||
info!("started zone {}", uuid);
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Started.into(),
|
||||
network: Some(zoneinfo_to_networkstate(&info)),
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
host: self.glt.host_uuid().to_string(),
|
||||
self.zlt.associate(uuid, info.domid).await;
|
||||
info!("created zone {}", uuid);
|
||||
zone.status = Some(ZoneStatus {
|
||||
state: ZoneState::Created.into(),
|
||||
network_status: Some(ip_reservation_to_network_status(&reservation)),
|
||||
exit_status: None,
|
||||
error_status: None,
|
||||
host: self.zlt.host_uuid().to_string(),
|
||||
domid: info.domid,
|
||||
});
|
||||
success.store(true, Ordering::Release);
|
@ -5,13 +5,23 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use self::create::ZoneCreator;
|
||||
use crate::config::DaemonConfig;
|
||||
use crate::db::ip::IpReservation;
|
||||
use crate::ip::assignment::IpAssignment;
|
||||
use crate::{
|
||||
db::zone::ZoneStore,
|
||||
devices::DaemonDeviceManager,
|
||||
event::{DaemonEvent, DaemonEventContext},
|
||||
zlt::ZoneLookupTable,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use krata::v1::{
|
||||
common::{Zone, ZoneErrorInfo, ZoneExitInfo, ZoneNetworkState, ZoneState, ZoneStatus},
|
||||
common::{Zone, ZoneErrorStatus, ZoneExitStatus, ZoneNetworkStatus, ZoneState, ZoneStatus},
|
||||
control::ZoneChangedEvent,
|
||||
};
|
||||
use krataoci::packer::service::OciPackerService;
|
||||
use kratart::{Runtime, ZoneInfo};
|
||||
use kratart::Runtime;
|
||||
use log::{error, info, trace, warn};
|
||||
use tokio::{
|
||||
select,
|
||||
@ -24,16 +34,7 @@ use tokio::{
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
db::ZoneStore,
|
||||
devices::DaemonDeviceManager,
|
||||
event::{DaemonEvent, DaemonEventContext},
|
||||
zlt::ZoneLookupTable,
|
||||
};
|
||||
|
||||
use self::start::ZoneStarter;
|
||||
|
||||
mod start;
|
||||
mod create;
|
||||
|
||||
const PARALLEL_LIMIT: u32 = 5;
|
||||
|
||||
@ -68,6 +69,8 @@ pub struct ZoneReconciler {
|
||||
tasks: Arc<Mutex<HashMap<Uuid, ZoneReconcilerEntry>>>,
|
||||
zone_reconciler_notify: Sender<Uuid>,
|
||||
zone_reconcile_lock: Arc<RwLock<()>>,
|
||||
ip_assignment: IpAssignment,
|
||||
config: Arc<DaemonConfig>,
|
||||
}
|
||||
|
||||
impl ZoneReconciler {
|
||||
@ -83,6 +86,8 @@ impl ZoneReconciler {
|
||||
kernel_path: PathBuf,
|
||||
initrd_path: PathBuf,
|
||||
modules_path: PathBuf,
|
||||
ip_assignment: IpAssignment,
|
||||
config: Arc<DaemonConfig>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
devices,
|
||||
@ -97,6 +102,8 @@ impl ZoneReconciler {
|
||||
tasks: Arc::new(Mutex::new(HashMap::new())),
|
||||
zone_reconciler_notify,
|
||||
zone_reconcile_lock: Arc::new(RwLock::with_max_readers((), PARALLEL_LIMIT)),
|
||||
ip_assignment,
|
||||
config,
|
||||
})
|
||||
}
|
||||
|
||||
@ -132,7 +139,7 @@ impl ZoneReconciler {
|
||||
error!("runtime reconciler failed: {}", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
@ -166,21 +173,21 @@ impl ZoneReconciler {
|
||||
let runtime_zone = runtime_zones.iter().find(|x| x.uuid == uuid);
|
||||
match runtime_zone {
|
||||
None => {
|
||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||
if state.status() == ZoneStatus::Started {
|
||||
state.status = ZoneStatus::Starting.into();
|
||||
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
|
||||
if status.state() == ZoneState::Created {
|
||||
status.state = ZoneState::Creating.into();
|
||||
}
|
||||
stored_zone.state = Some(state);
|
||||
stored_zone.status = Some(status);
|
||||
}
|
||||
|
||||
Some(runtime) => {
|
||||
self.zlt.associate(uuid, runtime.domid).await;
|
||||
let mut state = stored_zone.state.as_mut().cloned().unwrap_or_default();
|
||||
let mut status = stored_zone.status.as_mut().cloned().unwrap_or_default();
|
||||
if let Some(code) = runtime.state.exit_code {
|
||||
state.status = ZoneStatus::Exited.into();
|
||||
state.exit_info = Some(ZoneExitInfo { code });
|
||||
status.state = ZoneState::Exited.into();
|
||||
status.exit_status = Some(ZoneExitStatus { code });
|
||||
} else {
|
||||
state.status = ZoneStatus::Started.into();
|
||||
status.state = ZoneState::Created.into();
|
||||
}
|
||||
|
||||
for device in &stored_zone
|
||||
@ -193,8 +200,11 @@ impl ZoneReconciler {
|
||||
device_claims.insert(device.name.clone(), uuid);
|
||||
}
|
||||
|
||||
state.network = Some(zoneinfo_to_networkstate(runtime));
|
||||
stored_zone.state = Some(state);
|
||||
if let Some(reservation) = self.ip_assignment.retrieve(uuid).await? {
|
||||
status.network_status =
|
||||
Some(ip_reservation_to_network_status(&reservation));
|
||||
}
|
||||
stored_zone.status = Some(status);
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,20 +238,20 @@ impl ZoneReconciler {
|
||||
zone: Some(zone.clone()),
|
||||
}))?;
|
||||
|
||||
let start_status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let result = match start_status {
|
||||
ZoneStatus::Starting => self.start(uuid, &mut zone).await,
|
||||
ZoneStatus::Exited => self.exited(&mut zone).await,
|
||||
ZoneStatus::Destroying => self.destroy(uuid, &mut zone).await,
|
||||
let start_state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
|
||||
let result = match start_state {
|
||||
ZoneState::Creating => self.create(uuid, &mut zone).await,
|
||||
ZoneState::Exited => self.exited(&mut zone).await,
|
||||
ZoneState::Destroying => self.destroy(uuid, &mut zone).await,
|
||||
_ => Ok(ZoneReconcilerResult::Unchanged),
|
||||
};
|
||||
|
||||
let result = match result {
|
||||
Ok(result) => result,
|
||||
Err(error) => {
|
||||
zone.state = Some(zone.state.as_mut().cloned().unwrap_or_default());
|
||||
zone.state.as_mut().unwrap().status = ZoneStatus::Failed.into();
|
||||
zone.state.as_mut().unwrap().error_info = Some(ZoneErrorInfo {
|
||||
zone.status = Some(zone.status.as_mut().cloned().unwrap_or_default());
|
||||
zone.status.as_mut().unwrap().state = ZoneState::Failed.into();
|
||||
zone.status.as_mut().unwrap().error_status = Some(ZoneErrorStatus {
|
||||
message: error.to_string(),
|
||||
});
|
||||
warn!("failed to start zone {}: {}", zone.id, error);
|
||||
@ -251,8 +261,8 @@ impl ZoneReconciler {
|
||||
|
||||
info!("reconciled zone {}", uuid);
|
||||
|
||||
let status = zone.state.as_ref().map(|x| x.status()).unwrap_or_default();
|
||||
let destroyed = status == ZoneStatus::Destroyed;
|
||||
let state = zone.status.as_ref().map(|x| x.state()).unwrap_or_default();
|
||||
let destroyed = state == ZoneState::Destroyed;
|
||||
|
||||
let rerun = if let ZoneReconcilerResult::Changed { rerun } = result {
|
||||
let event = DaemonEvent::ZoneChanged(ZoneChangedEvent {
|
||||
@ -276,22 +286,24 @@ impl ZoneReconciler {
|
||||
Ok(rerun)
|
||||
}
|
||||
|
||||
async fn start(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
let starter = ZoneStarter {
|
||||
async fn create(&self, uuid: Uuid, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
let starter = ZoneCreator {
|
||||
devices: &self.devices,
|
||||
kernel_path: &self.kernel_path,
|
||||
initrd_path: &self.initrd_path,
|
||||
addons_path: &self.addons_path,
|
||||
packer: &self.packer,
|
||||
glt: &self.zlt,
|
||||
ip_assignment: &self.ip_assignment,
|
||||
zlt: &self.zlt,
|
||||
runtime: &self.runtime,
|
||||
config: &self.config,
|
||||
};
|
||||
starter.start(uuid, zone).await
|
||||
starter.create(uuid, zone).await
|
||||
}
|
||||
|
||||
async fn exited(&self, zone: &mut Zone) -> Result<ZoneReconcilerResult> {
|
||||
if let Some(ref mut state) = zone.state {
|
||||
state.set_status(ZoneStatus::Destroying);
|
||||
if let Some(ref mut status) = zone.status {
|
||||
status.set_state(ZoneState::Destroying);
|
||||
Ok(ZoneReconcilerResult::Changed { rerun: true })
|
||||
} else {
|
||||
Ok(ZoneReconcilerResult::Unchanged)
|
||||
@ -303,18 +315,19 @@ impl ZoneReconciler {
|
||||
trace!("failed to destroy runtime zone {}: {}", uuid, error);
|
||||
}
|
||||
|
||||
let domid = zone.state.as_ref().map(|x| x.domid);
|
||||
let domid = zone.status.as_ref().map(|x| x.domid);
|
||||
|
||||
if let Some(domid) = domid {
|
||||
self.zlt.remove(uuid, domid).await;
|
||||
}
|
||||
|
||||
info!("destroyed zone {}", uuid);
|
||||
zone.state = Some(ZoneState {
|
||||
status: ZoneStatus::Destroyed.into(),
|
||||
network: None,
|
||||
exit_info: None,
|
||||
error_info: None,
|
||||
self.ip_assignment.recall(uuid).await?;
|
||||
zone.status = Some(ZoneStatus {
|
||||
state: ZoneState::Destroyed.into(),
|
||||
network_status: None,
|
||||
exit_status: None,
|
||||
error_status: None,
|
||||
host: self.zlt.host_uuid().to_string(),
|
||||
domid: domid.unwrap_or(u32::MAX),
|
||||
});
|
||||
@ -362,13 +375,13 @@ impl ZoneReconciler {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn zoneinfo_to_networkstate(info: &ZoneInfo) -> ZoneNetworkState {
|
||||
ZoneNetworkState {
|
||||
zone_ipv4: info.zone_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||
zone_ipv6: info.zone_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||
zone_mac: info.zone_mac.as_ref().cloned().unwrap_or_default(),
|
||||
gateway_ipv4: info.gateway_ipv4.map(|x| x.to_string()).unwrap_or_default(),
|
||||
gateway_ipv6: info.gateway_ipv6.map(|x| x.to_string()).unwrap_or_default(),
|
||||
gateway_mac: info.gateway_mac.as_ref().cloned().unwrap_or_default(),
|
||||
pub fn ip_reservation_to_network_status(ip: &IpReservation) -> ZoneNetworkStatus {
|
||||
ZoneNetworkStatus {
|
||||
zone_ipv4: format!("{}/{}", ip.ipv4, ip.ipv4_prefix),
|
||||
zone_ipv6: format!("{}/{}", ip.ipv6, ip.ipv6_prefix),
|
||||
zone_mac: ip.mac.to_string().replace('-', ":"),
|
||||
gateway_ipv4: format!("{}/{}", ip.gateway_ipv4, ip.ipv4_prefix),
|
||||
gateway_ipv6: format!("{}/{}", ip.gateway_ipv6, ip.ipv6_prefix),
|
||||
gateway_mac: ip.gateway_mac.to_string().replace('-', ":"),
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user