Compare commits

..

18 Commits

13 changed files with 1278 additions and 508 deletions

8
.gitignore vendored
View File

@ -1,8 +1,12 @@
/target
/dls
/modd.conf
/m1_bootstrap-config
/config.yaml
/dist
/dkl
/tmp
/config.yaml
# dls assets
/cluster_*_*
/host_*_*

835
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -12,17 +12,23 @@ codegen-units = 1
[dependencies]
async-compression = { version = "0.4.27", features = ["tokio", "zstd"] }
base32 = "0.5.1"
bytes = "1.10.1"
chrono = { version = "0.4.41", default-features = false, features = ["now"] }
chrono = { version = "0.4.41", default-features = false, features = ["clock", "now"] }
clap = { version = "4.5.40", features = ["derive", "env"] }
clap_complete = { version = "4.5.54", features = ["unstable-dynamic"] }
env_logger = "0.11.8"
eyre = "0.6.12"
futures = "0.3.31"
futures-util = "0.3.31"
glob = "0.3.2"
hex = "0.4.3"
log = "0.4.27"
lz4 = "1.28.1"
nix = { version = "0.30.1", features = ["user"] }
openssl = "0.10.73"
page_size = "0.6.0"
reqwest = { version = "0.12.20", features = ["json", "stream"] }
reqwest = { version = "0.12.20", features = ["json", "stream", "native-tls"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
serde_yaml = "0.9.34"

View File

@ -24,21 +24,46 @@ enum Command {
prefix: String,
},
Logger {
#[arg(long, short = 'p', default_value = "/var/log")]
/// Path where the logs are stored
#[arg(long, short = 'p', default_value = "/var/log", env = "DKL_LOG_PATH")]
log_path: String,
/// Name of the log instead of the command's basename
#[arg(long, short = 'n')]
log_name: Option<String>,
/// prefix log lines with time & stream
#[arg(long)]
with_prefix: bool,
command: String,
args: Vec<String>,
},
Log {
#[arg(long, short = 'p', default_value = "/var/log")]
/// Path where the logs are stored
#[arg(long, short = 'p', default_value = "/var/log", env = "DKL_LOG_PATH")]
log_path: String,
/// Name of the log set to operate on.
log_name: String,
since: Option<String>,
until: Option<String>,
#[command(subcommand)]
op: LogOp,
},
Dynlay {
layer: String,
version: String,
#[arg(
long,
short = 'u',
default_value = "https://dkl.novit.io/dist/layers",
env = "DKL_DYNLAY_URL"
)]
url_prefix: String,
#[arg(
long,
short = 'd',
default_value = "/opt/dynlay",
env = "DKL_DYNLAY_DIR"
)]
layers_dir: String,
#[arg(long, default_value = "/")]
chroot: std::path::PathBuf,
},
}
@ -84,33 +109,118 @@ async fn main() -> Result<()> {
C::Log {
log_path,
log_name,
since,
until,
op,
} => op.run(&log_path, &log_name).await,
C::Dynlay {
ref layer,
ref version,
ref url_prefix,
ref layers_dir,
chroot,
} => {
let since = parse_ts_arg(since)?;
let until = parse_ts_arg(until)?;
let mut files = dkl::logger::log_files(&log_path, &log_name).await?;
files.sort();
let mut out = tokio::io::stdout();
for f in files {
if !since.is_none_or(|since| f.timestamp >= since) {
continue;
}
if !until.is_none_or(|until| f.timestamp <= until) {
continue;
}
debug!("{f:?}");
f.copy_to(&mut out).await?;
dkl::dynlay::Dynlay {
url_prefix,
layers_dir,
chroot,
}
Ok(())
.install(layer, version)
.await
}
}
}
async fn apply_config(config_file: &str, filters: &[glob::Pattern], chroot: &str) -> Result<()> {
let config = fs::read_to_string(config_file).await?;
let config: dkl::Config = serde_yaml::from_str(&config)?;
let files = if filters.is_empty() {
config.files
} else {
(config.files.into_iter())
.filter(|f| filters.iter().any(|filter| filter.matches(&f.path)))
.collect()
};
dkl::apply::files(&files, chroot).await
}
#[derive(Subcommand)]
enum LogOp {
Ls {
#[arg(short = 'l', long)]
detail: bool,
},
Cleanup {
/// days of log to keep
days: u64,
},
Cat {
/// print logs >= since
since: Option<String>,
/// print logs <= until
until: Option<String>,
},
}
impl LogOp {
async fn run(self, log_path: &str, log_name: &str) -> Result<()> {
let mut files = dkl::logger::log_files(&log_path, &log_name).await?;
files.sort();
use LogOp as Op;
match self {
Op::Ls { detail } => {
for f in files {
let path = f.path.to_string_lossy();
if detail {
println!("{ts} {path}", ts = f.timestamp);
} else {
println!("{path}");
}
}
}
Op::Cleanup { days } => {
let deadline = chrono::Utc::now() - chrono::Days::new(days);
let deadline = dkl::logger::trunc_ts(deadline);
debug!("cleanup {log_name} logs < {deadline}");
for f in files {
if f.timestamp < deadline {
debug!("removing {}", f.path.to_string_lossy());
fs::remove_file(f.path).await?;
}
}
}
Op::Cat { since, until } => {
let since = parse_ts_arg(since)?;
let until = parse_ts_arg(until)?;
let mut out = tokio::io::stdout();
for f in files {
if !since.is_none_or(|since| f.timestamp >= since) {
continue;
}
if !until.is_none_or(|until| f.timestamp <= until) {
continue;
}
debug!(
"cat {path} (timestamp={ts}, compressed={comp})",
path = f.path.to_string_lossy(),
ts = f.timestamp.to_rfc3339(),
comp = f.compressed
);
if let Err(e) = f.copy_to(&mut out).await {
error!("{file}: {e}", file = f.path.to_string_lossy());
}
}
}
}
Ok(())
}
}
fn parse_ts_arg(ts: Option<String>) -> Result<Option<dkl::logger::Timestamp>> {
match ts {
None => Ok(None),
@ -126,21 +236,6 @@ fn basename(path: &str) -> &str {
path.rsplit_once('/').map_or(path, |split| split.1)
}
async fn apply_config(config_file: &str, filters: &[glob::Pattern], chroot: &str) -> Result<()> {
let config = fs::read_to_string(config_file).await?;
let config: dkl::Config = serde_yaml::from_str(&config)?;
let files = if filters.is_empty() {
config.files
} else {
(config.files.into_iter())
.filter(|f| filters.iter().any(|filter| filter.matches(&f.path)))
.collect()
};
dkl::apply::files(&files, chroot).await
}
fn parse_globs(filters: &[String]) -> Result<Vec<glob::Pattern>> {
let mut errors = false;
let filters = (filters.iter())

View File

@ -1,14 +1,18 @@
use bytes::Bytes;
use clap::{CommandFactory, Parser, Subcommand};
use eyre::{Result, format_err};
use eyre::format_err;
use futures_util::Stream;
use futures_util::StreamExt;
use tokio::io::AsyncWriteExt;
use std::time::{Duration, SystemTime};
use tokio::fs;
use tokio::io::{AsyncWrite, AsyncWriteExt};
use dkl::dls;
#[derive(Parser)]
#[command()]
struct Cli {
#[arg(long, default_value = "http://[::1]:7606")]
#[arg(long, default_value = "http://[::1]:7606", env = "DLS_URL")]
dls: String,
#[command(subcommand)]
@ -25,9 +29,36 @@ enum Command {
},
Hosts,
Host {
#[arg(short = 'o', long)]
out: Option<String>,
host: String,
asset: Option<String>,
},
#[command(subcommand)]
DlSet(DlSet),
}
#[derive(Subcommand)]
enum DlSet {
Sign {
#[arg(short = 'e', long, default_value = "1d")]
expiry: String,
#[arg(value_parser = parse_download_set_item)]
items: Vec<dls::DownloadSetItem>,
},
Show {
#[arg(env = "DLS_DLSET")]
signed_set: String,
},
Fetch {
#[arg(long, env = "DLS_DLSET")]
signed_set: String,
#[arg(short = 'o', long)]
out: Option<String>,
kind: String,
name: String,
asset: String,
},
}
#[derive(Subcommand)]
@ -62,7 +93,7 @@ enum ClusterCommand {
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<()> {
async fn main() -> eyre::Result<()> {
clap_complete::CompleteEnv::with_factory(Cli::command).complete();
let cli = Cli::parse();
@ -125,44 +156,110 @@ async fn main() -> Result<()> {
}
}
C::Hosts => write_json(&dls.hosts().await?),
C::Host { host, asset } => {
C::Host { out, host, asset } => {
let host_name = host.clone();
let host = dls.host(host);
match asset {
None => write_json(&host.config().await?),
Some(asset) => {
let mut stream = host.asset(&asset).await?;
let out_path = format!("{host_name}_{asset}");
eprintln!("writing {host_name} asset {asset} to {out_path}");
let out = tokio::fs::File::options()
.mode(0o600)
.write(true)
.create(true)
.truncate(true)
.open(out_path)
.await?;
let mut out = tokio::io::BufWriter::new(out);
let mut n = 0u64;
while let Some(chunk) = stream.next().await {
let chunk = chunk?;
n += chunk.len() as u64;
eprint!("wrote {n} bytes\r");
out.write_all(&chunk).await?;
}
eprintln!();
out.flush().await?;
let stream = host.asset(&asset).await?;
let mut out = create_asset_file(out, "host", &host_name, &asset).await?;
copy_stream(stream, &mut out).await?;
}
}
}
C::DlSet(set) => match set {
DlSet::Sign { expiry, items } => {
let req = dls::DownloadSetReq { expiry, items };
let signed = dls.sign_dl_set(&req).await?;
println!("{signed}");
}
DlSet::Show { signed_set } => {
let raw = base32::decode(base32::Alphabet::Rfc4648 { padding: false }, &signed_set)
.ok_or(format_err!("invalid dlset"))?;
let sig_len = raw[0] as usize;
let (sig, data) = raw[1..].split_at(sig_len);
println!("signature: {}...", hex::encode(&sig[..16]));
let data = lz4::Decoder::new(data)?;
let data = std::io::read_to_string(data)?;
let (expiry, items) = data.split_once('|').ok_or(format_err!("invalid dlset"))?;
let expiry = i64::from_str_radix(expiry, 16)?;
let expiry = chrono::DateTime::from_timestamp(expiry, 0).unwrap();
println!("expires on {expiry}");
for item in items.split('|') {
let mut parts = item.split(':');
let Some(kind) = parts.next() else {
continue;
};
let Some(name) = parts.next() else {
continue;
};
for asset in parts {
println!("- {kind} {name} {asset}");
}
}
}
DlSet::Fetch {
signed_set,
out,
kind,
name,
asset,
} => {
let stream = dls.fetch_dl_set(&signed_set, &kind, &name, &asset).await?;
let mut out = create_asset_file(out, &kind, &name, &asset).await?;
copy_stream(stream, &mut out).await?;
}
},
};
Ok(())
}
async fn create_asset_file(
path: Option<String>,
kind: &str,
name: &str,
asset: &str,
) -> std::io::Result<fs::File> {
let path = &path.unwrap_or(format!("{kind}_{name}_{asset}"));
eprintln!("writing {kind} {name} asset {asset} to {path}");
(fs::File::options().write(true).create(true).truncate(true))
.mode(0o600)
.open(path)
.await
}
async fn copy_stream(
mut stream: impl Stream<Item = reqwest::Result<Bytes>> + Unpin,
out: &mut (impl AsyncWrite + Unpin),
) -> std::io::Result<()> {
let mut out = tokio::io::BufWriter::new(out);
let info_delay = Duration::from_secs(1);
let mut ts = SystemTime::now();
let mut n = 0u64;
while let Some(chunk) = stream.next().await {
let chunk = chunk.map_err(|e| std::io::Error::other(e))?;
n += chunk.len() as u64;
out.write_all(&chunk).await?;
if ts.elapsed().is_ok_and(|t| t >= info_delay) {
eprint!("wrote {n} bytes\r");
ts = SystemTime::now();
}
}
eprintln!("wrote {n} bytes");
out.flush().await
}
fn write_json<T: serde::ser::Serialize>(v: &T) {
let data = serde_json::to_string_pretty(v).expect("value should serialize to json");
println!("{data}");
@ -172,6 +269,20 @@ fn write_raw(raw: &[u8]) {
use std::io::Write;
let mut out = std::io::stdout();
out.write(raw).expect("stdout write");
out.write_all(raw).expect("stdout write");
out.flush().expect("stdout flush");
}
fn parse_download_set_item(s: &str) -> Result<dls::DownloadSetItem, std::io::Error> {
let err = |s: &str| std::io::Error::other(s);
let mut parts = s.split(':');
let item = dls::DownloadSetItem {
kind: parts.next().ok_or(err("no kind"))?.to_string(),
name: parts.next().ok_or(err("no name"))?.to_string(),
assets: parts.map(|p| p.to_string()).collect(),
};
Ok(item)
}

View File

@ -2,7 +2,7 @@ use std::collections::BTreeMap as Map;
pub const TAKE_ALL: i16 = -1;
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Config {
pub anti_phishing_code: String,
@ -14,20 +14,22 @@ pub struct Config {
#[serde(skip_serializing_if = "Option::is_none")]
pub resolv_conf: Option<String>,
#[serde(default)]
#[serde(default, skip_serializing_if = "Map::is_empty")]
pub vpns: Map<String, String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub networks: Vec<Network>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub auths: Vec<Auth>,
#[serde(default)]
pub ssh: SSHServer,
#[serde(default)]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pre_lvm_crypt: Vec<CryptDev>,
#[serde(default)]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub lvm: Vec<LvmVG>,
#[serde(default)]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub crypt: Vec<CryptDev>,
#[serde(skip_serializing_if = "Option::is_none")]
@ -36,7 +38,30 @@ pub struct Config {
pub bootstrap: Bootstrap,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
impl Config {
pub fn new(bootstrap_dev: String) -> Self {
Self {
anti_phishing_code: "Direktil<3".into(),
keymap: None,
modules: None,
resolv_conf: None,
vpns: Map::new(),
networks: vec![],
auths: vec![],
ssh: Default::default(),
pre_lvm_crypt: vec![],
lvm: vec![],
crypt: vec![],
signer_public_key: None,
bootstrap: Bootstrap {
dev: bootstrap_dev,
seed: None,
},
}
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Auth {
pub name: String,
#[serde(alias = "sshKey")]
@ -46,18 +71,21 @@ pub struct Auth {
pub password: Option<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Network {
pub name: String,
pub interfaces: Vec<NetworkInterface>,
pub script: String,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct NetworkInterface {
pub var: String,
pub n: i16,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub regexps: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub udev: Option<UdevFilter>,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
@ -74,7 +102,7 @@ impl Default for SSHServer {
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmVG {
#[serde(alias = "vg")]
pub name: String,
@ -86,7 +114,7 @@ pub struct LvmVG {
pub lvs: Vec<LvmLV>,
}
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct LvmLVDefaults {
#[serde(default)]
pub fs: Filesystem,
@ -94,9 +122,10 @@ pub struct LvmLVDefaults {
pub raid: Raid,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Default, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Filesystem {
#[default]
Ext4,
Xfs,
Btrfs,
@ -115,13 +144,7 @@ impl Filesystem {
}
}
impl Default for Filesystem {
fn default() -> Self {
Filesystem::Ext4
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmLV {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
@ -132,61 +155,98 @@ pub struct LvmLV {
pub size: LvSize,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum LvSize {
Size(String),
Extents(String),
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmPV {
pub n: i16,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub regexps: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub udev: Option<UdevFilter>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct CryptDev {
pub name: String,
#[serde(flatten)]
pub filter: DevFilter,
// hit the limit of enum representation here (flatten + enum variant case)
#[serde(skip_serializing_if = "Option::is_none")]
pub dev: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prefix: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub udev: Option<UdevFilter>,
#[serde(skip_serializing_if = "Option::is_none")]
pub optional: Option<bool>,
}
impl CryptDev {
pub fn filter(&self) -> DevFilter<'_> {
if let Some(dev) = self.dev.as_deref() {
DevFilter::Dev(dev)
} else if let Some(prefix) = self.prefix.as_deref() {
DevFilter::Prefix(prefix)
} else if let Some(udev) = self.udev.as_ref() {
DevFilter::Udev(udev)
} else {
DevFilter::None
}
}
pub fn optional(&self) -> bool {
self.optional.unwrap_or_else(|| self.filter.is_prefix())
self.optional.unwrap_or_else(|| match self.filter() {
DevFilter::None => true,
DevFilter::Dev(_) => false,
DevFilter::Prefix(_) => true,
DevFilter::Udev(_) => true,
})
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[test]
fn test_parse_crypt_dev() {
for s in [
"name: sys0\ndev: /dev/sda\n",
"name: crypt-\nprefix: /dev/sd\n",
"name: crypt-${name}\nudev: !glob [ DEVNAME, /dev/sd* ]\n",
] {
let dev: CryptDev = serde_yaml::from_str(s).unwrap();
dev.filter();
dev.optional();
}
}
pub enum DevFilter<'t> {
None,
Dev(&'t str),
Prefix(&'t str),
Udev(&'t UdevFilter),
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum DevFilter {
Dev(String),
Prefix(String),
}
impl DevFilter {
pub fn is_dev(&self) -> bool {
match self {
Self::Dev(_) => true,
_ => false,
}
}
pub fn is_prefix(&self) -> bool {
match self {
Self::Prefix(_) => true,
_ => false,
}
}
pub enum UdevFilter {
Has(String),
Eq(String, String),
Glob(String, String),
And(Vec<UdevFilter>),
Or(Vec<UdevFilter>),
Not(Box<UdevFilter>),
}
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct Raid {
pub mirrors: Option<u8>,
pub stripes: Option<u8>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Bootstrap {
pub dev: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub seed: Option<String>,
}

View File

@ -5,6 +5,7 @@ use reqwest::Method;
use std::collections::BTreeMap as Map;
use std::fmt::Display;
use std::net::IpAddr;
use std::time::Duration;
pub struct Client {
base_url: String,
@ -33,7 +34,7 @@ impl Client {
self.get_json("clusters").await
}
pub fn cluster(&self, name: String) -> Cluster {
pub fn cluster(&self, name: String) -> Cluster<'_> {
Cluster { dls: self, name }
}
@ -41,16 +42,30 @@ impl Client {
self.get_json("hosts").await
}
pub fn host(&self, name: String) -> Host {
pub fn host(&self, name: String) -> Host<'_> {
Host { dls: self, name }
}
pub async fn get_json<T: serde::de::DeserializeOwned>(&self, path: impl Display) -> Result<T> {
let req = self.get(&path)?.header("Accept", "application/json");
pub async fn sign_dl_set(&self, req: &DownloadSetReq) -> Result<String> {
let req = (self.req(Method::POST, "sign-download-set")?).json(req);
self.req_json(req).await
}
pub async fn fetch_dl_set(
&self,
signed_dlset: &str,
kind: &str,
name: &str,
asset: &str,
) -> Result<impl Stream<Item = reqwest::Result<Bytes>>> {
let req = self.get(format!(
"public/download-set/{kind}/{name}/{asset}?set={signed_dlset}"
))?;
let resp = do_req(req, &self.token).await?;
Ok(resp.bytes_stream())
}
let body = resp.bytes().await.map_err(Error::Read)?;
serde_json::from_slice(&body).map_err(Error::Parse)
pub async fn get_json<T: serde::de::DeserializeOwned>(&self, path: impl Display) -> Result<T> {
self.req_json(self.get(&path)?).await
}
pub async fn get_bytes(&self, path: impl Display) -> Result<Vec<u8>> {
let resp = do_req(self.get(&path)?, &self.token).await?;
@ -60,6 +75,16 @@ impl Client {
self.req(Method::GET, path)
}
pub async fn req_json<T: serde::de::DeserializeOwned>(
&self,
req: reqwest::RequestBuilder,
) -> Result<T> {
let req = req.header("Accept", "application/json");
let resp = do_req(req, &self.token).await?;
let body = resp.bytes().await.map_err(Error::Read)?;
serde_json::from_slice(&body).map_err(Error::Parse)
}
pub fn req(&self, method: Method, path: impl Display) -> Result<reqwest::RequestBuilder> {
let uri = format!("{}/{path}", self.base_url);
@ -143,22 +168,33 @@ pub struct ClusterConfig {
pub addons: String,
}
#[derive(serde::Deserialize, serde::Serialize)]
#[derive(Default, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct HostConfig {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub cluster_name: Option<String>,
pub annotations: Map<String, String>,
pub bootstrap_config: String,
#[serde(rename = "IPXE")]
pub ipxe: Option<String>,
#[serde(rename = "IPs")]
pub ips: Vec<IpAddr>,
#[serde(skip_serializing_if = "Map::is_empty")]
pub labels: Map<String, String>,
#[serde(skip_serializing_if = "Map::is_empty")]
pub annotations: Map<String, String>,
#[serde(rename = "IPXE", skip_serializing_if = "Option::is_none")]
pub ipxe: Option<String>,
pub initrd: String,
pub kernel: String,
pub labels: Map<String, String>,
pub versions: Map<String, String>,
pub bootstrap_config: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub initrd_files: Vec<crate::File>,
pub config: String,
}
#[derive(serde::Deserialize, serde::Serialize)]
@ -184,6 +220,23 @@ pub struct KubeSignReq {
pub validity: Option<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DownloadSetReq {
pub expiry: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub items: Vec<DownloadSetItem>,
}
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct DownloadSetItem {
pub kind: String,
pub name: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub assets: Vec<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct ServerError {
#[serde(default)]
@ -253,3 +306,50 @@ pub enum Error {
#[error("response parsing failed: {0}")]
Parse(serde_json::Error),
}
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum File {
Static(crate::File),
Gen { path: String, from: ContentGen },
}
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ContentGen {
CaCrt(CaRef),
TlsKey(TlsRef),
TlsCrt {
key: TlsRef,
ca: CaRef,
profile: CertProfile,
},
}
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum CaRef {
Global(String),
Cluster(String, String),
}
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum TlsRef {
Cluster(String, String),
Host(String, String),
}
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum CertProfile {
Client,
Server,
/// basicaly Client+Server
Peer,
Kube {
user: String,
group: String,
duration: Duration,
},
}

188
src/dynlay.rs Normal file
View File

@ -0,0 +1,188 @@
use eyre::{format_err, Result};
use log::{debug, error, info, warn};
use std::path::PathBuf;
use tokio::{fs, io::AsyncWriteExt, process::Command};
use crate::fs::spawn_walk_dir;
pub struct Dynlay<'t> {
pub url_prefix: &'t str,
pub layers_dir: &'t str,
pub chroot: PathBuf,
}
impl<'t> Dynlay<'t> {
pub async fn install(&self, layer: &str, version: &str) -> Result<()> {
let lay_dir = &format!("{base}/{layer}", base = self.layers_dir);
debug!("mkdir -p {lay_dir}");
fs::create_dir_all(lay_dir).await?;
let lay_path = &format!("{lay_dir}/{version}");
if !fs::try_exists(lay_path).await? {
let part_file = &format!("{lay_dir}/{version}.tmp");
self.fetch(layer, version, part_file).await?;
(fs::rename(part_file, lay_path).await)
.map_err(|e| format_err!("failed mv {part_file} {lay_path}: {e}"))?;
}
let mount_path = PathBuf::from(lay_dir).join("mounts").join(layer);
(fs::create_dir_all(&mount_path).await)
.map_err(|e| format_err!("mkdir -p {mount_path:?} failed: {e}"))?;
let mount_path = &fs::canonicalize(mount_path).await?;
let mount_path_str = &mount_path.to_string_lossy().into_owned();
if is_mount_point(mount_path_str).await? {
info!("clearing previous mount");
let mut paths = spawn_walk_dir(mount_path.clone());
while let Some(result) = paths.recv().await {
let Ok((path, md)) = result else {
continue;
};
if !md.is_dir() {
let path = self.chroot.join(&path);
debug!("rm {path:?}");
if let Err(e) = fs::remove_file(&path).await {
warn!("rm {path:?} failed: {e}");
}
}
}
sudo("umount", &[mount_path]).await?;
}
// mount layer
info!("mounting layer");
sudo("mount", &["-t", "squashfs", lay_path, &mount_path_str]).await?;
let mut paths = spawn_walk_dir(mount_path.clone());
while let Some(result) = paths.recv().await {
let Ok((path, md)) = result else {
continue;
};
let target = self.chroot.join(&path);
if md.is_dir() {
debug!("mkdir -p {target:?}");
if let Err(e) = fs::create_dir_all(&target).await {
error!("mkdir -p {target:?} failed: {e}");
}
} else {
let _ = fs::remove_file(&target).await;
let source = mount_path.join(&path);
debug!("ln -s {source:?} {target:?}");
if let Err(e) = fs::symlink(&source, &target).await {
error!("ln -s {source:?} {target:?} failed: {e}");
}
}
}
Ok(())
}
async fn fetch(&self, layer: &str, version: &str, part_file: &str) -> Result<()> {
let url = &format!("{}/{layer}/{version}", self.url_prefix);
info!("fetching {url}");
let mut out = (fs::File::create(part_file).await)
.map_err(|e| format_err!("failed to open {part_file}: {e}"))?;
let resp = reqwest::get(url).await?;
if !resp.status().is_success() {
return Err(format_err!("fetch failed: {}", resp.status()));
}
let sha1 = (resp.headers().get("x-content-sha1"))
.ok_or(format_err!("no content hash in response"))?;
let sha1 = (sha1.to_str()).map_err(|e| format_err!("invalid sha1: {e}"))?;
debug!("content sha1: {sha1}");
let mut exp_sha1 = [0; 20];
hex::decode_to_slice(sha1, &mut exp_sha1).map_err(|e| format_err!("invalid sha1: {e}"))?;
let mut hash = openssl::sha::Sha1::new();
use futures::StreamExt;
let mut stream = resp.bytes_stream();
while let Some(bytes) = stream.next().await {
let bytes = bytes.map_err(|e| format_err!("remote read error: {e}"))?;
hash.update(&bytes);
(out.write_all(&bytes).await).map_err(|e| format_err!("local write error: {e}"))?;
}
(out.flush().await).map_err(|e| format_err!("local write error: {e}"))?;
drop(out);
let dl_sha1 = hash.finish();
if dl_sha1 != exp_sha1 {
if let Err(e) = fs::remove_file(part_file).await {
error!("failed to remove {part_file}: {e}");
}
return Err(format_err!(
"invalid content hash: expected {exp}, got {got}",
exp = hex::encode(exp_sha1),
got = hex::encode(dl_sha1)
));
}
Ok(())
}
}
async fn sudo<I, S>(program: &str, args: I) -> Result<()>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
let mut cmd = if nix::unistd::geteuid().is_root() {
let mut cmd = Command::new(program);
cmd.args(args);
cmd
} else {
let mut cmd = Command::new("sudo");
cmd.arg(program).args(args);
cmd
};
let status = cmd.status().await?;
if status.success() {
Ok(())
} else {
Err(format_err!("{program} failed: {status}"))
}
}
async fn is_mount_point(target: &str) -> Result<bool> {
for line in fs::read_to_string("/proc/self/mounts").await?.lines() {
let line = line.trim_ascii();
if line.is_empty() || line.starts_with("#") {
continue;
}
let split: Vec<_> = line.split_ascii_whitespace().collect();
if split.len() < 6 {
continue;
}
// let dev = split[0];
let mount_point = split[1];
// let fstype = split[2];
// let mntops = split[3];
// let fs_freq = split[4];
// let fsck_passno = split[5];
if mount_point == target {
return Ok(true);
}
}
Ok(false)
}

60
src/fs.rs Normal file
View File

@ -0,0 +1,60 @@
use eyre::Result;
use std::fs::Metadata;
use std::path::PathBuf;
use tokio::fs::read_dir;
use tokio::sync::mpsc;
pub fn spawn_walk_dir(
dir: impl Into<PathBuf> + Send + 'static,
) -> mpsc::Receiver<Result<(PathBuf, Metadata)>> {
let (tx, rx) = mpsc::channel(1);
tokio::spawn(walk_dir(dir, tx));
rx
}
pub async fn walk_dir(dir: impl Into<PathBuf>, tx: mpsc::Sender<Result<(PathBuf, Metadata)>>) {
let dir: PathBuf = dir.into();
let mut todo = std::collections::LinkedList::new();
if let Ok(rd) = read_dir(&dir).await {
todo.push_front(rd);
}
while let Some(rd) = todo.front_mut() {
let entry = match rd.next_entry().await {
Ok(v) => v,
Err(e) => {
if tx.send(Err(e.into())).await.is_err() {
return;
}
todo.pop_front(); // skip dir on error
continue;
}
};
let Some(entry) = entry else {
todo.pop_front();
continue;
};
let Ok(md) = entry.metadata().await else {
continue;
};
let is_dir = md.is_dir();
let Ok(path) = entry.path().strip_prefix(&dir).map(|p| p.to_path_buf()) else {
continue; // sub-entry not in dir, weird but semantically, we ignore
};
if tx.send(Ok((path, md))).await.is_err() {
return;
}
// recurse in sub directories
if is_dir {
if let Ok(rd) = read_dir(entry.path()).await {
todo.push_front(rd);
}
}
}
}

View File

@ -1,9 +1,11 @@
pub mod apply;
pub mod bootstrap;
pub mod dls;
pub mod dynlay;
pub mod fs;
pub mod logger;
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct Config {
pub layers: Vec<String>,
pub root_user: RootUser,
@ -17,18 +19,20 @@ pub struct Config {
pub users: Vec<User>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct RootUser {
#[serde(skip_serializing_if = "Option::is_none")]
pub password_hash: Option<String>,
pub authorized_keys: Vec<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct Mount {
pub r#type: Option<String>,
pub dev: String,
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub options: Option<String>,
}

View File

@ -35,7 +35,7 @@ impl<'t> Logger<'t> {
let archives_read_dir = (fs::read_dir(archives_path).await)
.map_err(|e| format_err!("failed to list archives: {e}"))?;
let mut prev_stamp = ts_trunc(Utc::now());
let mut prev_stamp = trunc_ts(Utc::now());
let mut current_log = BufWriter::new(self.open_log(prev_stamp).await?);
tokio::spawn(compress_archives(
@ -94,7 +94,7 @@ impl<'t> Logger<'t> {
prev_stamp: &mut Timestamp,
out: &mut BufWriter<File>,
) -> Result<()> {
let trunc_ts = ts_trunc(log.ts);
let trunc_ts = trunc_ts(log.ts);
if *prev_stamp < trunc_ts {
// switch log
out.flush().await?;
@ -193,7 +193,7 @@ async fn copy(
}
}
fn ts_trunc(ts: Timestamp) -> Timestamp {
pub fn trunc_ts(ts: Timestamp) -> Timestamp {
ts.duration_trunc(TRUNC_DELTA)
.expect("duration_trunc failed")
}
@ -243,7 +243,7 @@ async fn compress(path: impl AsRef<Path>) {
.await
.map_err(|e| format_err!("open {path_str} failed: {e}"))?;
let out_path = path.with_extension("zstd");
let out_path = path.with_extension("zst");
let out = (File::create(&out_path).await) // create output
.map_err(|e| format_err!("create {} failed: {e}", out_path.to_string_lossy()))?;
@ -285,14 +285,16 @@ pub async fn log_files(log_path: &str, log_name: &str) -> std::io::Result<Vec<Lo
continue;
};
let (name, compressed) = file_name
.strip_suffix(".zstd")
.map_or((file_name, false), |s| (s, true));
let Some(name) = name.strip_suffix(".log") else {
let Some((name, ext)) = file_name.rsplit_once('.') else {
continue;
};
let compressed = match ext {
"zst" => true,
"log" => false,
_ => continue,
};
let Some((name, timestamp)) = name.rsplit_once('.') else {
continue;
};
@ -331,12 +333,12 @@ impl PartialOrd for LogFile {
impl LogFile {
pub async fn copy_to(&self, out: &mut (impl AsyncWrite + Unpin)) -> io::Result<u64> {
let mut input = File::open(&self.path).await?;
let input = &mut File::open(&self.path).await?;
if self.compressed {
let mut out = ZstdDecoder::new(out);
tokio::io::copy(&mut input, &mut out).await
let out = &mut ZstdDecoder::new(out);
tokio::io::copy(input, out).await
} else {
tokio::io::copy(&mut input, out).await
tokio::io::copy(input, out).await
}
}
}

View File

@ -3,7 +3,7 @@ set -ex
dkl=target/debug/dkl
test=${1:-log}
test=${1:-dynlay}
export RUST_LOG=debug
@ -19,8 +19,20 @@ case $test in
cat tmp/log/bash.log
;;
log)
$dkl log --log-path tmp/log bash 20250720_12 20250720_16
log-ls)
$dkl log --log-path tmp/log bash ls -l
;;
log-cat)
$dkl log --log-path tmp/log bash cat 20250720_12 20301231_23
;;
log-clean)
$dkl log --log-path tmp/log bash ls -l
$dkl log --log-path tmp/log bash cleanup 0
;;
dynlay)
mkdir -p tmp/system
$dkl dynlay --layers-dir tmp/dynlay --chroot tmp/system kubernetes v1.33.3_containerd.2.0.5
;;
*) echo 1>&2 "unknown test: $test"; exit 1 ;;

View File

@ -18,3 +18,12 @@ $dls cluster cluster ssh-sign ~/.ssh/id_ed25519.pub
$dls host m1 | jq '{Name, ClusterName, IPs}'
$dls host m1 bootstrap-config
export DLS_DLSET=$($dls dl-set sign --expiry 1d \
cluster:cluster:addons \
host:m1:kernel:initrd:bootstrap.tar \
host:m2:config:bootstrap-config:boot.vmdk)
$dls dl-set show
$dls dl-set fetch host m2 bootstrap-config
rm host_m2_bootstrap-config