Compare commits
18 Commits
b1bf8f3fb8
...
wip
| Author | SHA1 | Date | |
|---|---|---|---|
| 93f3af0ba8 | |||
| c4ed68d0e9 | |||
| 3fe6fc9222 | |||
| d7dfea2dec | |||
| a3d3ccfd25 | |||
| 9cef7a773e | |||
| 4ecee15b6b | |||
| 5e9f3e64d8 | |||
| 3cc2111ca7 | |||
| 1e047afac3 | |||
| 6f059287ec | |||
| bbea9b9c00 | |||
| be5db231d9 | |||
| b01c41b856 | |||
| 4ccda5039b | |||
| 852738bec3 | |||
| 7d02d8f932 | |||
| 52c23653ac |
8
.gitignore
vendored
8
.gitignore
vendored
@ -1,8 +1,12 @@
|
|||||||
/target
|
/target
|
||||||
/dls
|
/dls
|
||||||
/modd.conf
|
/modd.conf
|
||||||
/m1_bootstrap-config
|
|
||||||
/config.yaml
|
|
||||||
/dist
|
/dist
|
||||||
/dkl
|
/dkl
|
||||||
/tmp
|
/tmp
|
||||||
|
|
||||||
|
/config.yaml
|
||||||
|
|
||||||
|
# dls assets
|
||||||
|
/cluster_*_*
|
||||||
|
/host_*_*
|
||||||
|
|||||||
835
Cargo.lock
generated
835
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@ -12,17 +12,23 @@ codegen-units = 1
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-compression = { version = "0.4.27", features = ["tokio", "zstd"] }
|
async-compression = { version = "0.4.27", features = ["tokio", "zstd"] }
|
||||||
|
base32 = "0.5.1"
|
||||||
bytes = "1.10.1"
|
bytes = "1.10.1"
|
||||||
chrono = { version = "0.4.41", default-features = false, features = ["now"] }
|
chrono = { version = "0.4.41", default-features = false, features = ["clock", "now"] }
|
||||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||||
clap_complete = { version = "4.5.54", features = ["unstable-dynamic"] }
|
clap_complete = { version = "4.5.54", features = ["unstable-dynamic"] }
|
||||||
env_logger = "0.11.8"
|
env_logger = "0.11.8"
|
||||||
eyre = "0.6.12"
|
eyre = "0.6.12"
|
||||||
|
futures = "0.3.31"
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
glob = "0.3.2"
|
glob = "0.3.2"
|
||||||
|
hex = "0.4.3"
|
||||||
log = "0.4.27"
|
log = "0.4.27"
|
||||||
|
lz4 = "1.28.1"
|
||||||
|
nix = { version = "0.30.1", features = ["user"] }
|
||||||
|
openssl = "0.10.73"
|
||||||
page_size = "0.6.0"
|
page_size = "0.6.0"
|
||||||
reqwest = { version = "0.12.20", features = ["json", "stream"] }
|
reqwest = { version = "0.12.20", features = ["json", "stream", "native-tls"] }
|
||||||
serde = { version = "1.0.219", features = ["derive"] }
|
serde = { version = "1.0.219", features = ["derive"] }
|
||||||
serde_json = "1.0.140"
|
serde_json = "1.0.140"
|
||||||
serde_yaml = "0.9.34"
|
serde_yaml = "0.9.34"
|
||||||
|
|||||||
147
src/bin/dkl.rs
147
src/bin/dkl.rs
@ -24,21 +24,46 @@ enum Command {
|
|||||||
prefix: String,
|
prefix: String,
|
||||||
},
|
},
|
||||||
Logger {
|
Logger {
|
||||||
#[arg(long, short = 'p', default_value = "/var/log")]
|
/// Path where the logs are stored
|
||||||
|
#[arg(long, short = 'p', default_value = "/var/log", env = "DKL_LOG_PATH")]
|
||||||
log_path: String,
|
log_path: String,
|
||||||
|
/// Name of the log instead of the command's basename
|
||||||
#[arg(long, short = 'n')]
|
#[arg(long, short = 'n')]
|
||||||
log_name: Option<String>,
|
log_name: Option<String>,
|
||||||
|
/// prefix log lines with time & stream
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
with_prefix: bool,
|
with_prefix: bool,
|
||||||
command: String,
|
command: String,
|
||||||
args: Vec<String>,
|
args: Vec<String>,
|
||||||
},
|
},
|
||||||
Log {
|
Log {
|
||||||
#[arg(long, short = 'p', default_value = "/var/log")]
|
/// Path where the logs are stored
|
||||||
|
#[arg(long, short = 'p', default_value = "/var/log", env = "DKL_LOG_PATH")]
|
||||||
log_path: String,
|
log_path: String,
|
||||||
|
/// Name of the log set to operate on.
|
||||||
log_name: String,
|
log_name: String,
|
||||||
since: Option<String>,
|
#[command(subcommand)]
|
||||||
until: Option<String>,
|
op: LogOp,
|
||||||
|
},
|
||||||
|
Dynlay {
|
||||||
|
layer: String,
|
||||||
|
version: String,
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
short = 'u',
|
||||||
|
default_value = "https://dkl.novit.io/dist/layers",
|
||||||
|
env = "DKL_DYNLAY_URL"
|
||||||
|
)]
|
||||||
|
url_prefix: String,
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
short = 'd',
|
||||||
|
default_value = "/opt/dynlay",
|
||||||
|
env = "DKL_DYNLAY_DIR"
|
||||||
|
)]
|
||||||
|
layers_dir: String,
|
||||||
|
#[arg(long, default_value = "/")]
|
||||||
|
chroot: std::path::PathBuf,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,15 +109,92 @@ async fn main() -> Result<()> {
|
|||||||
C::Log {
|
C::Log {
|
||||||
log_path,
|
log_path,
|
||||||
log_name,
|
log_name,
|
||||||
since,
|
op,
|
||||||
until,
|
} => op.run(&log_path, &log_name).await,
|
||||||
|
C::Dynlay {
|
||||||
|
ref layer,
|
||||||
|
ref version,
|
||||||
|
ref url_prefix,
|
||||||
|
ref layers_dir,
|
||||||
|
chroot,
|
||||||
} => {
|
} => {
|
||||||
let since = parse_ts_arg(since)?;
|
dkl::dynlay::Dynlay {
|
||||||
let until = parse_ts_arg(until)?;
|
url_prefix,
|
||||||
|
layers_dir,
|
||||||
|
chroot,
|
||||||
|
}
|
||||||
|
.install(layer, version)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn apply_config(config_file: &str, filters: &[glob::Pattern], chroot: &str) -> Result<()> {
|
||||||
|
let config = fs::read_to_string(config_file).await?;
|
||||||
|
let config: dkl::Config = serde_yaml::from_str(&config)?;
|
||||||
|
|
||||||
|
let files = if filters.is_empty() {
|
||||||
|
config.files
|
||||||
|
} else {
|
||||||
|
(config.files.into_iter())
|
||||||
|
.filter(|f| filters.iter().any(|filter| filter.matches(&f.path)))
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
dkl::apply::files(&files, chroot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
enum LogOp {
|
||||||
|
Ls {
|
||||||
|
#[arg(short = 'l', long)]
|
||||||
|
detail: bool,
|
||||||
|
},
|
||||||
|
Cleanup {
|
||||||
|
/// days of log to keep
|
||||||
|
days: u64,
|
||||||
|
},
|
||||||
|
Cat {
|
||||||
|
/// print logs >= since
|
||||||
|
since: Option<String>,
|
||||||
|
/// print logs <= until
|
||||||
|
until: Option<String>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogOp {
|
||||||
|
async fn run(self, log_path: &str, log_name: &str) -> Result<()> {
|
||||||
let mut files = dkl::logger::log_files(&log_path, &log_name).await?;
|
let mut files = dkl::logger::log_files(&log_path, &log_name).await?;
|
||||||
files.sort();
|
files.sort();
|
||||||
|
|
||||||
|
use LogOp as Op;
|
||||||
|
match self {
|
||||||
|
Op::Ls { detail } => {
|
||||||
|
for f in files {
|
||||||
|
let path = f.path.to_string_lossy();
|
||||||
|
if detail {
|
||||||
|
println!("{ts} {path}", ts = f.timestamp);
|
||||||
|
} else {
|
||||||
|
println!("{path}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Op::Cleanup { days } => {
|
||||||
|
let deadline = chrono::Utc::now() - chrono::Days::new(days);
|
||||||
|
let deadline = dkl::logger::trunc_ts(deadline);
|
||||||
|
debug!("cleanup {log_name} logs < {deadline}");
|
||||||
|
|
||||||
|
for f in files {
|
||||||
|
if f.timestamp < deadline {
|
||||||
|
debug!("removing {}", f.path.to_string_lossy());
|
||||||
|
fs::remove_file(f.path).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Op::Cat { since, until } => {
|
||||||
|
let since = parse_ts_arg(since)?;
|
||||||
|
let until = parse_ts_arg(until)?;
|
||||||
|
|
||||||
let mut out = tokio::io::stdout();
|
let mut out = tokio::io::stdout();
|
||||||
|
|
||||||
for f in files {
|
for f in files {
|
||||||
@ -103,13 +205,21 @@ async fn main() -> Result<()> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("{f:?}");
|
debug!(
|
||||||
f.copy_to(&mut out).await?;
|
"cat {path} (timestamp={ts}, compressed={comp})",
|
||||||
|
path = f.path.to_string_lossy(),
|
||||||
|
ts = f.timestamp.to_rfc3339(),
|
||||||
|
comp = f.compressed
|
||||||
|
);
|
||||||
|
if let Err(e) = f.copy_to(&mut out).await {
|
||||||
|
error!("{file}: {e}", file = f.path.to_string_lossy());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_ts_arg(ts: Option<String>) -> Result<Option<dkl::logger::Timestamp>> {
|
fn parse_ts_arg(ts: Option<String>) -> Result<Option<dkl::logger::Timestamp>> {
|
||||||
match ts {
|
match ts {
|
||||||
@ -126,21 +236,6 @@ fn basename(path: &str) -> &str {
|
|||||||
path.rsplit_once('/').map_or(path, |split| split.1)
|
path.rsplit_once('/').map_or(path, |split| split.1)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn apply_config(config_file: &str, filters: &[glob::Pattern], chroot: &str) -> Result<()> {
|
|
||||||
let config = fs::read_to_string(config_file).await?;
|
|
||||||
let config: dkl::Config = serde_yaml::from_str(&config)?;
|
|
||||||
|
|
||||||
let files = if filters.is_empty() {
|
|
||||||
config.files
|
|
||||||
} else {
|
|
||||||
(config.files.into_iter())
|
|
||||||
.filter(|f| filters.iter().any(|filter| filter.matches(&f.path)))
|
|
||||||
.collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
dkl::apply::files(&files, chroot).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_globs(filters: &[String]) -> Result<Vec<glob::Pattern>> {
|
fn parse_globs(filters: &[String]) -> Result<Vec<glob::Pattern>> {
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
let filters = (filters.iter())
|
let filters = (filters.iter())
|
||||||
|
|||||||
171
src/bin/dls.rs
171
src/bin/dls.rs
@ -1,14 +1,18 @@
|
|||||||
|
use bytes::Bytes;
|
||||||
use clap::{CommandFactory, Parser, Subcommand};
|
use clap::{CommandFactory, Parser, Subcommand};
|
||||||
use eyre::{Result, format_err};
|
use eyre::format_err;
|
||||||
|
use futures_util::Stream;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use std::time::{Duration, SystemTime};
|
||||||
|
use tokio::fs;
|
||||||
|
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||||
|
|
||||||
use dkl::dls;
|
use dkl::dls;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command()]
|
#[command()]
|
||||||
struct Cli {
|
struct Cli {
|
||||||
#[arg(long, default_value = "http://[::1]:7606")]
|
#[arg(long, default_value = "http://[::1]:7606", env = "DLS_URL")]
|
||||||
dls: String,
|
dls: String,
|
||||||
|
|
||||||
#[command(subcommand)]
|
#[command(subcommand)]
|
||||||
@ -25,9 +29,36 @@ enum Command {
|
|||||||
},
|
},
|
||||||
Hosts,
|
Hosts,
|
||||||
Host {
|
Host {
|
||||||
|
#[arg(short = 'o', long)]
|
||||||
|
out: Option<String>,
|
||||||
host: String,
|
host: String,
|
||||||
asset: Option<String>,
|
asset: Option<String>,
|
||||||
},
|
},
|
||||||
|
#[command(subcommand)]
|
||||||
|
DlSet(DlSet),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
enum DlSet {
|
||||||
|
Sign {
|
||||||
|
#[arg(short = 'e', long, default_value = "1d")]
|
||||||
|
expiry: String,
|
||||||
|
#[arg(value_parser = parse_download_set_item)]
|
||||||
|
items: Vec<dls::DownloadSetItem>,
|
||||||
|
},
|
||||||
|
Show {
|
||||||
|
#[arg(env = "DLS_DLSET")]
|
||||||
|
signed_set: String,
|
||||||
|
},
|
||||||
|
Fetch {
|
||||||
|
#[arg(long, env = "DLS_DLSET")]
|
||||||
|
signed_set: String,
|
||||||
|
#[arg(short = 'o', long)]
|
||||||
|
out: Option<String>,
|
||||||
|
kind: String,
|
||||||
|
name: String,
|
||||||
|
asset: String,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
@ -62,7 +93,7 @@ enum ClusterCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main(flavor = "current_thread")]
|
#[tokio::main(flavor = "current_thread")]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> eyre::Result<()> {
|
||||||
clap_complete::CompleteEnv::with_factory(Cli::command).complete();
|
clap_complete::CompleteEnv::with_factory(Cli::command).complete();
|
||||||
|
|
||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
@ -125,44 +156,110 @@ async fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
C::Hosts => write_json(&dls.hosts().await?),
|
C::Hosts => write_json(&dls.hosts().await?),
|
||||||
C::Host { host, asset } => {
|
C::Host { out, host, asset } => {
|
||||||
let host_name = host.clone();
|
let host_name = host.clone();
|
||||||
let host = dls.host(host);
|
let host = dls.host(host);
|
||||||
match asset {
|
match asset {
|
||||||
None => write_json(&host.config().await?),
|
None => write_json(&host.config().await?),
|
||||||
Some(asset) => {
|
Some(asset) => {
|
||||||
let mut stream = host.asset(&asset).await?;
|
let stream = host.asset(&asset).await?;
|
||||||
|
let mut out = create_asset_file(out, "host", &host_name, &asset).await?;
|
||||||
let out_path = format!("{host_name}_{asset}");
|
copy_stream(stream, &mut out).await?;
|
||||||
eprintln!("writing {host_name} asset {asset} to {out_path}");
|
|
||||||
|
|
||||||
let out = tokio::fs::File::options()
|
|
||||||
.mode(0o600)
|
|
||||||
.write(true)
|
|
||||||
.create(true)
|
|
||||||
.truncate(true)
|
|
||||||
.open(out_path)
|
|
||||||
.await?;
|
|
||||||
let mut out = tokio::io::BufWriter::new(out);
|
|
||||||
|
|
||||||
let mut n = 0u64;
|
|
||||||
while let Some(chunk) = stream.next().await {
|
|
||||||
let chunk = chunk?;
|
|
||||||
n += chunk.len() as u64;
|
|
||||||
eprint!("wrote {n} bytes\r");
|
|
||||||
out.write_all(&chunk).await?;
|
|
||||||
}
|
|
||||||
eprintln!();
|
|
||||||
|
|
||||||
out.flush().await?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
C::DlSet(set) => match set {
|
||||||
|
DlSet::Sign { expiry, items } => {
|
||||||
|
let req = dls::DownloadSetReq { expiry, items };
|
||||||
|
let signed = dls.sign_dl_set(&req).await?;
|
||||||
|
println!("{signed}");
|
||||||
|
}
|
||||||
|
DlSet::Show { signed_set } => {
|
||||||
|
let raw = base32::decode(base32::Alphabet::Rfc4648 { padding: false }, &signed_set)
|
||||||
|
.ok_or(format_err!("invalid dlset"))?;
|
||||||
|
|
||||||
|
let sig_len = raw[0] as usize;
|
||||||
|
let (sig, data) = raw[1..].split_at(sig_len);
|
||||||
|
println!("signature: {}...", hex::encode(&sig[..16]));
|
||||||
|
|
||||||
|
let data = lz4::Decoder::new(data)?;
|
||||||
|
let data = std::io::read_to_string(data)?;
|
||||||
|
|
||||||
|
let (expiry, items) = data.split_once('|').ok_or(format_err!("invalid dlset"))?;
|
||||||
|
let expiry = i64::from_str_radix(expiry, 16)?;
|
||||||
|
let expiry = chrono::DateTime::from_timestamp(expiry, 0).unwrap();
|
||||||
|
|
||||||
|
println!("expires on {expiry}");
|
||||||
|
|
||||||
|
for item in items.split('|') {
|
||||||
|
let mut parts = item.split(':');
|
||||||
|
let Some(kind) = parts.next() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some(name) = parts.next() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
for asset in parts {
|
||||||
|
println!("- {kind} {name} {asset}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DlSet::Fetch {
|
||||||
|
signed_set,
|
||||||
|
out,
|
||||||
|
kind,
|
||||||
|
name,
|
||||||
|
asset,
|
||||||
|
} => {
|
||||||
|
let stream = dls.fetch_dl_set(&signed_set, &kind, &name, &asset).await?;
|
||||||
|
let mut out = create_asset_file(out, &kind, &name, &asset).await?;
|
||||||
|
copy_stream(stream, &mut out).await?;
|
||||||
|
}
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn create_asset_file(
|
||||||
|
path: Option<String>,
|
||||||
|
kind: &str,
|
||||||
|
name: &str,
|
||||||
|
asset: &str,
|
||||||
|
) -> std::io::Result<fs::File> {
|
||||||
|
let path = &path.unwrap_or(format!("{kind}_{name}_{asset}"));
|
||||||
|
eprintln!("writing {kind} {name} asset {asset} to {path}");
|
||||||
|
(fs::File::options().write(true).create(true).truncate(true))
|
||||||
|
.mode(0o600)
|
||||||
|
.open(path)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn copy_stream(
|
||||||
|
mut stream: impl Stream<Item = reqwest::Result<Bytes>> + Unpin,
|
||||||
|
out: &mut (impl AsyncWrite + Unpin),
|
||||||
|
) -> std::io::Result<()> {
|
||||||
|
let mut out = tokio::io::BufWriter::new(out);
|
||||||
|
|
||||||
|
let info_delay = Duration::from_secs(1);
|
||||||
|
let mut ts = SystemTime::now();
|
||||||
|
|
||||||
|
let mut n = 0u64;
|
||||||
|
while let Some(chunk) = stream.next().await {
|
||||||
|
let chunk = chunk.map_err(|e| std::io::Error::other(e))?;
|
||||||
|
n += chunk.len() as u64;
|
||||||
|
out.write_all(&chunk).await?;
|
||||||
|
|
||||||
|
if ts.elapsed().is_ok_and(|t| t >= info_delay) {
|
||||||
|
eprint!("wrote {n} bytes\r");
|
||||||
|
ts = SystemTime::now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eprintln!("wrote {n} bytes");
|
||||||
|
|
||||||
|
out.flush().await
|
||||||
|
}
|
||||||
|
|
||||||
fn write_json<T: serde::ser::Serialize>(v: &T) {
|
fn write_json<T: serde::ser::Serialize>(v: &T) {
|
||||||
let data = serde_json::to_string_pretty(v).expect("value should serialize to json");
|
let data = serde_json::to_string_pretty(v).expect("value should serialize to json");
|
||||||
println!("{data}");
|
println!("{data}");
|
||||||
@ -172,6 +269,20 @@ fn write_raw(raw: &[u8]) {
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
let mut out = std::io::stdout();
|
let mut out = std::io::stdout();
|
||||||
out.write(raw).expect("stdout write");
|
out.write_all(raw).expect("stdout write");
|
||||||
out.flush().expect("stdout flush");
|
out.flush().expect("stdout flush");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn parse_download_set_item(s: &str) -> Result<dls::DownloadSetItem, std::io::Error> {
|
||||||
|
let err = |s: &str| std::io::Error::other(s);
|
||||||
|
|
||||||
|
let mut parts = s.split(':');
|
||||||
|
|
||||||
|
let item = dls::DownloadSetItem {
|
||||||
|
kind: parts.next().ok_or(err("no kind"))?.to_string(),
|
||||||
|
name: parts.next().ok_or(err("no name"))?.to_string(),
|
||||||
|
assets: parts.map(|p| p.to_string()).collect(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(item)
|
||||||
|
}
|
||||||
|
|||||||
148
src/bootstrap.rs
148
src/bootstrap.rs
@ -2,7 +2,7 @@ use std::collections::BTreeMap as Map;
|
|||||||
|
|
||||||
pub const TAKE_ALL: i16 = -1;
|
pub const TAKE_ALL: i16 = -1;
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub anti_phishing_code: String,
|
pub anti_phishing_code: String,
|
||||||
|
|
||||||
@ -14,20 +14,22 @@ pub struct Config {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub resolv_conf: Option<String>,
|
pub resolv_conf: Option<String>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default, skip_serializing_if = "Map::is_empty")]
|
||||||
pub vpns: Map<String, String>,
|
pub vpns: Map<String, String>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub networks: Vec<Network>,
|
pub networks: Vec<Network>,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub auths: Vec<Auth>,
|
pub auths: Vec<Auth>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub ssh: SSHServer,
|
pub ssh: SSHServer,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub pre_lvm_crypt: Vec<CryptDev>,
|
pub pre_lvm_crypt: Vec<CryptDev>,
|
||||||
#[serde(default)]
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub lvm: Vec<LvmVG>,
|
pub lvm: Vec<LvmVG>,
|
||||||
#[serde(default)]
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub crypt: Vec<CryptDev>,
|
pub crypt: Vec<CryptDev>,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
@ -36,7 +38,30 @@ pub struct Config {
|
|||||||
pub bootstrap: Bootstrap,
|
pub bootstrap: Bootstrap,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
impl Config {
|
||||||
|
pub fn new(bootstrap_dev: String) -> Self {
|
||||||
|
Self {
|
||||||
|
anti_phishing_code: "Direktil<3".into(),
|
||||||
|
keymap: None,
|
||||||
|
modules: None,
|
||||||
|
resolv_conf: None,
|
||||||
|
vpns: Map::new(),
|
||||||
|
networks: vec![],
|
||||||
|
auths: vec![],
|
||||||
|
ssh: Default::default(),
|
||||||
|
pre_lvm_crypt: vec![],
|
||||||
|
lvm: vec![],
|
||||||
|
crypt: vec![],
|
||||||
|
signer_public_key: None,
|
||||||
|
bootstrap: Bootstrap {
|
||||||
|
dev: bootstrap_dev,
|
||||||
|
seed: None,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Auth {
|
pub struct Auth {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(alias = "sshKey")]
|
#[serde(alias = "sshKey")]
|
||||||
@ -46,18 +71,21 @@ pub struct Auth {
|
|||||||
pub password: Option<String>,
|
pub password: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Network {
|
pub struct Network {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub interfaces: Vec<NetworkInterface>,
|
pub interfaces: Vec<NetworkInterface>,
|
||||||
pub script: String,
|
pub script: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct NetworkInterface {
|
pub struct NetworkInterface {
|
||||||
pub var: String,
|
pub var: String,
|
||||||
pub n: i16,
|
pub n: i16,
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub regexps: Vec<String>,
|
pub regexps: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub udev: Option<UdevFilter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
@ -74,7 +102,7 @@ impl Default for SSHServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct LvmVG {
|
pub struct LvmVG {
|
||||||
#[serde(alias = "vg")]
|
#[serde(alias = "vg")]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@ -86,7 +114,7 @@ pub struct LvmVG {
|
|||||||
pub lvs: Vec<LvmLV>,
|
pub lvs: Vec<LvmLV>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct LvmLVDefaults {
|
pub struct LvmLVDefaults {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub fs: Filesystem,
|
pub fs: Filesystem,
|
||||||
@ -94,9 +122,10 @@ pub struct LvmLVDefaults {
|
|||||||
pub raid: Raid,
|
pub raid: Raid,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Default, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum Filesystem {
|
pub enum Filesystem {
|
||||||
|
#[default]
|
||||||
Ext4,
|
Ext4,
|
||||||
Xfs,
|
Xfs,
|
||||||
Btrfs,
|
Btrfs,
|
||||||
@ -115,13 +144,7 @@ impl Filesystem {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Filesystem {
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
fn default() -> Self {
|
|
||||||
Filesystem::Ext4
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
|
||||||
pub struct LvmLV {
|
pub struct LvmLV {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
@ -132,61 +155,98 @@ pub struct LvmLV {
|
|||||||
pub size: LvSize,
|
pub size: LvSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum LvSize {
|
pub enum LvSize {
|
||||||
Size(String),
|
Size(String),
|
||||||
Extents(String),
|
Extents(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct LvmPV {
|
pub struct LvmPV {
|
||||||
pub n: i16,
|
pub n: i16,
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
pub regexps: Vec<String>,
|
pub regexps: Vec<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub udev: Option<UdevFilter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct CryptDev {
|
pub struct CryptDev {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(flatten)]
|
// hit the limit of enum representation here (flatten + enum variant case)
|
||||||
pub filter: DevFilter,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub dev: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub prefix: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub udev: Option<UdevFilter>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub optional: Option<bool>,
|
pub optional: Option<bool>,
|
||||||
}
|
}
|
||||||
impl CryptDev {
|
impl CryptDev {
|
||||||
|
pub fn filter(&self) -> DevFilter<'_> {
|
||||||
|
if let Some(dev) = self.dev.as_deref() {
|
||||||
|
DevFilter::Dev(dev)
|
||||||
|
} else if let Some(prefix) = self.prefix.as_deref() {
|
||||||
|
DevFilter::Prefix(prefix)
|
||||||
|
} else if let Some(udev) = self.udev.as_ref() {
|
||||||
|
DevFilter::Udev(udev)
|
||||||
|
} else {
|
||||||
|
DevFilter::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn optional(&self) -> bool {
|
pub fn optional(&self) -> bool {
|
||||||
self.optional.unwrap_or_else(|| self.filter.is_prefix())
|
self.optional.unwrap_or_else(|| match self.filter() {
|
||||||
|
DevFilter::None => true,
|
||||||
|
DevFilter::Dev(_) => false,
|
||||||
|
DevFilter::Prefix(_) => true,
|
||||||
|
DevFilter::Udev(_) => true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[test]
|
||||||
|
fn test_parse_crypt_dev() {
|
||||||
|
for s in [
|
||||||
|
"name: sys0\ndev: /dev/sda\n",
|
||||||
|
"name: crypt-\nprefix: /dev/sd\n",
|
||||||
|
"name: crypt-${name}\nudev: !glob [ DEVNAME, /dev/sd* ]\n",
|
||||||
|
] {
|
||||||
|
let dev: CryptDev = serde_yaml::from_str(s).unwrap();
|
||||||
|
dev.filter();
|
||||||
|
dev.optional();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum DevFilter<'t> {
|
||||||
|
None,
|
||||||
|
Dev(&'t str),
|
||||||
|
Prefix(&'t str),
|
||||||
|
Udev(&'t UdevFilter),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum DevFilter {
|
pub enum UdevFilter {
|
||||||
Dev(String),
|
Has(String),
|
||||||
Prefix(String),
|
Eq(String, String),
|
||||||
}
|
Glob(String, String),
|
||||||
impl DevFilter {
|
And(Vec<UdevFilter>),
|
||||||
pub fn is_dev(&self) -> bool {
|
Or(Vec<UdevFilter>),
|
||||||
match self {
|
Not(Box<UdevFilter>),
|
||||||
Self::Dev(_) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn is_prefix(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Self::Prefix(_) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Raid {
|
pub struct Raid {
|
||||||
pub mirrors: Option<u8>,
|
pub mirrors: Option<u8>,
|
||||||
pub stripes: Option<u8>,
|
pub stripes: Option<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Bootstrap {
|
pub struct Bootstrap {
|
||||||
pub dev: String,
|
pub dev: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub seed: Option<String>,
|
pub seed: Option<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
124
src/dls.rs
124
src/dls.rs
@ -5,6 +5,7 @@ use reqwest::Method;
|
|||||||
use std::collections::BTreeMap as Map;
|
use std::collections::BTreeMap as Map;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
base_url: String,
|
base_url: String,
|
||||||
@ -33,7 +34,7 @@ impl Client {
|
|||||||
self.get_json("clusters").await
|
self.get_json("clusters").await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cluster(&self, name: String) -> Cluster {
|
pub fn cluster(&self, name: String) -> Cluster<'_> {
|
||||||
Cluster { dls: self, name }
|
Cluster { dls: self, name }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -41,16 +42,30 @@ impl Client {
|
|||||||
self.get_json("hosts").await
|
self.get_json("hosts").await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn host(&self, name: String) -> Host {
|
pub fn host(&self, name: String) -> Host<'_> {
|
||||||
Host { dls: self, name }
|
Host { dls: self, name }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_json<T: serde::de::DeserializeOwned>(&self, path: impl Display) -> Result<T> {
|
pub async fn sign_dl_set(&self, req: &DownloadSetReq) -> Result<String> {
|
||||||
let req = self.get(&path)?.header("Accept", "application/json");
|
let req = (self.req(Method::POST, "sign-download-set")?).json(req);
|
||||||
|
self.req_json(req).await
|
||||||
|
}
|
||||||
|
pub async fn fetch_dl_set(
|
||||||
|
&self,
|
||||||
|
signed_dlset: &str,
|
||||||
|
kind: &str,
|
||||||
|
name: &str,
|
||||||
|
asset: &str,
|
||||||
|
) -> Result<impl Stream<Item = reqwest::Result<Bytes>>> {
|
||||||
|
let req = self.get(format!(
|
||||||
|
"public/download-set/{kind}/{name}/{asset}?set={signed_dlset}"
|
||||||
|
))?;
|
||||||
let resp = do_req(req, &self.token).await?;
|
let resp = do_req(req, &self.token).await?;
|
||||||
|
Ok(resp.bytes_stream())
|
||||||
|
}
|
||||||
|
|
||||||
let body = resp.bytes().await.map_err(Error::Read)?;
|
pub async fn get_json<T: serde::de::DeserializeOwned>(&self, path: impl Display) -> Result<T> {
|
||||||
serde_json::from_slice(&body).map_err(Error::Parse)
|
self.req_json(self.get(&path)?).await
|
||||||
}
|
}
|
||||||
pub async fn get_bytes(&self, path: impl Display) -> Result<Vec<u8>> {
|
pub async fn get_bytes(&self, path: impl Display) -> Result<Vec<u8>> {
|
||||||
let resp = do_req(self.get(&path)?, &self.token).await?;
|
let resp = do_req(self.get(&path)?, &self.token).await?;
|
||||||
@ -60,6 +75,16 @@ impl Client {
|
|||||||
self.req(Method::GET, path)
|
self.req(Method::GET, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn req_json<T: serde::de::DeserializeOwned>(
|
||||||
|
&self,
|
||||||
|
req: reqwest::RequestBuilder,
|
||||||
|
) -> Result<T> {
|
||||||
|
let req = req.header("Accept", "application/json");
|
||||||
|
let resp = do_req(req, &self.token).await?;
|
||||||
|
|
||||||
|
let body = resp.bytes().await.map_err(Error::Read)?;
|
||||||
|
serde_json::from_slice(&body).map_err(Error::Parse)
|
||||||
|
}
|
||||||
pub fn req(&self, method: Method, path: impl Display) -> Result<reqwest::RequestBuilder> {
|
pub fn req(&self, method: Method, path: impl Display) -> Result<reqwest::RequestBuilder> {
|
||||||
let uri = format!("{}/{path}", self.base_url);
|
let uri = format!("{}/{path}", self.base_url);
|
||||||
|
|
||||||
@ -143,22 +168,33 @@ pub struct ClusterConfig {
|
|||||||
pub addons: String,
|
pub addons: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(serde::Deserialize, serde::Serialize)]
|
#[derive(Default, serde::Deserialize, serde::Serialize)]
|
||||||
#[serde(rename_all = "PascalCase")]
|
#[serde(rename_all = "PascalCase")]
|
||||||
pub struct HostConfig {
|
pub struct HostConfig {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub cluster_name: Option<String>,
|
pub cluster_name: Option<String>,
|
||||||
|
|
||||||
pub annotations: Map<String, String>,
|
|
||||||
pub bootstrap_config: String,
|
|
||||||
#[serde(rename = "IPXE")]
|
|
||||||
pub ipxe: Option<String>,
|
|
||||||
#[serde(rename = "IPs")]
|
#[serde(rename = "IPs")]
|
||||||
pub ips: Vec<IpAddr>,
|
pub ips: Vec<IpAddr>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Map::is_empty")]
|
||||||
|
pub labels: Map<String, String>,
|
||||||
|
#[serde(skip_serializing_if = "Map::is_empty")]
|
||||||
|
pub annotations: Map<String, String>,
|
||||||
|
|
||||||
|
#[serde(rename = "IPXE", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ipxe: Option<String>,
|
||||||
|
|
||||||
pub initrd: String,
|
pub initrd: String,
|
||||||
pub kernel: String,
|
pub kernel: String,
|
||||||
pub labels: Map<String, String>,
|
|
||||||
pub versions: Map<String, String>,
|
pub versions: Map<String, String>,
|
||||||
|
|
||||||
|
pub bootstrap_config: String,
|
||||||
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
|
pub initrd_files: Vec<crate::File>,
|
||||||
|
|
||||||
|
pub config: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(serde::Deserialize, serde::Serialize)]
|
#[derive(serde::Deserialize, serde::Serialize)]
|
||||||
@ -184,6 +220,23 @@ pub struct KubeSignReq {
|
|||||||
pub validity: Option<String>,
|
pub validity: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Deserialize, serde::Serialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
pub struct DownloadSetReq {
|
||||||
|
pub expiry: String,
|
||||||
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||||
|
pub items: Vec<DownloadSetItem>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||||
|
#[serde(rename_all = "PascalCase")]
|
||||||
|
pub struct DownloadSetItem {
|
||||||
|
pub kind: String,
|
||||||
|
pub name: String,
|
||||||
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||||
|
pub assets: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||||
struct ServerError {
|
struct ServerError {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@ -253,3 +306,50 @@ pub enum Error {
|
|||||||
#[error("response parsing failed: {0}")]
|
#[error("response parsing failed: {0}")]
|
||||||
Parse(serde_json::Error),
|
Parse(serde_json::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum File {
|
||||||
|
Static(crate::File),
|
||||||
|
Gen { path: String, from: ContentGen },
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum ContentGen {
|
||||||
|
CaCrt(CaRef),
|
||||||
|
TlsKey(TlsRef),
|
||||||
|
TlsCrt {
|
||||||
|
key: TlsRef,
|
||||||
|
ca: CaRef,
|
||||||
|
profile: CertProfile,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum CaRef {
|
||||||
|
Global(String),
|
||||||
|
Cluster(String, String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum TlsRef {
|
||||||
|
Cluster(String, String),
|
||||||
|
Host(String, String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum CertProfile {
|
||||||
|
Client,
|
||||||
|
Server,
|
||||||
|
/// basicaly Client+Server
|
||||||
|
Peer,
|
||||||
|
Kube {
|
||||||
|
user: String,
|
||||||
|
group: String,
|
||||||
|
duration: Duration,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|||||||
188
src/dynlay.rs
Normal file
188
src/dynlay.rs
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
use eyre::{format_err, Result};
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use tokio::{fs, io::AsyncWriteExt, process::Command};
|
||||||
|
|
||||||
|
use crate::fs::spawn_walk_dir;
|
||||||
|
|
||||||
|
pub struct Dynlay<'t> {
|
||||||
|
pub url_prefix: &'t str,
|
||||||
|
pub layers_dir: &'t str,
|
||||||
|
pub chroot: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'t> Dynlay<'t> {
|
||||||
|
pub async fn install(&self, layer: &str, version: &str) -> Result<()> {
|
||||||
|
let lay_dir = &format!("{base}/{layer}", base = self.layers_dir);
|
||||||
|
|
||||||
|
debug!("mkdir -p {lay_dir}");
|
||||||
|
fs::create_dir_all(lay_dir).await?;
|
||||||
|
|
||||||
|
let lay_path = &format!("{lay_dir}/{version}");
|
||||||
|
|
||||||
|
if !fs::try_exists(lay_path).await? {
|
||||||
|
let part_file = &format!("{lay_dir}/{version}.tmp");
|
||||||
|
|
||||||
|
self.fetch(layer, version, part_file).await?;
|
||||||
|
|
||||||
|
(fs::rename(part_file, lay_path).await)
|
||||||
|
.map_err(|e| format_err!("failed mv {part_file} {lay_path}: {e}"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mount_path = PathBuf::from(lay_dir).join("mounts").join(layer);
|
||||||
|
|
||||||
|
(fs::create_dir_all(&mount_path).await)
|
||||||
|
.map_err(|e| format_err!("mkdir -p {mount_path:?} failed: {e}"))?;
|
||||||
|
|
||||||
|
let mount_path = &fs::canonicalize(mount_path).await?;
|
||||||
|
let mount_path_str = &mount_path.to_string_lossy().into_owned();
|
||||||
|
|
||||||
|
if is_mount_point(mount_path_str).await? {
|
||||||
|
info!("clearing previous mount");
|
||||||
|
|
||||||
|
let mut paths = spawn_walk_dir(mount_path.clone());
|
||||||
|
while let Some(result) = paths.recv().await {
|
||||||
|
let Ok((path, md)) = result else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
if !md.is_dir() {
|
||||||
|
let path = self.chroot.join(&path);
|
||||||
|
|
||||||
|
debug!("rm {path:?}");
|
||||||
|
if let Err(e) = fs::remove_file(&path).await {
|
||||||
|
warn!("rm {path:?} failed: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sudo("umount", &[mount_path]).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// mount layer
|
||||||
|
info!("mounting layer");
|
||||||
|
sudo("mount", &["-t", "squashfs", lay_path, &mount_path_str]).await?;
|
||||||
|
|
||||||
|
let mut paths = spawn_walk_dir(mount_path.clone());
|
||||||
|
while let Some(result) = paths.recv().await {
|
||||||
|
let Ok((path, md)) = result else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let target = self.chroot.join(&path);
|
||||||
|
|
||||||
|
if md.is_dir() {
|
||||||
|
debug!("mkdir -p {target:?}");
|
||||||
|
if let Err(e) = fs::create_dir_all(&target).await {
|
||||||
|
error!("mkdir -p {target:?} failed: {e}");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let _ = fs::remove_file(&target).await;
|
||||||
|
|
||||||
|
let source = mount_path.join(&path);
|
||||||
|
|
||||||
|
debug!("ln -s {source:?} {target:?}");
|
||||||
|
if let Err(e) = fs::symlink(&source, &target).await {
|
||||||
|
error!("ln -s {source:?} {target:?} failed: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch(&self, layer: &str, version: &str, part_file: &str) -> Result<()> {
|
||||||
|
let url = &format!("{}/{layer}/{version}", self.url_prefix);
|
||||||
|
info!("fetching {url}");
|
||||||
|
|
||||||
|
let mut out = (fs::File::create(part_file).await)
|
||||||
|
.map_err(|e| format_err!("failed to open {part_file}: {e}"))?;
|
||||||
|
|
||||||
|
let resp = reqwest::get(url).await?;
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
return Err(format_err!("fetch failed: {}", resp.status()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let sha1 = (resp.headers().get("x-content-sha1"))
|
||||||
|
.ok_or(format_err!("no content hash in response"))?;
|
||||||
|
let sha1 = (sha1.to_str()).map_err(|e| format_err!("invalid sha1: {e}"))?;
|
||||||
|
|
||||||
|
debug!("content sha1: {sha1}");
|
||||||
|
let mut exp_sha1 = [0; 20];
|
||||||
|
hex::decode_to_slice(sha1, &mut exp_sha1).map_err(|e| format_err!("invalid sha1: {e}"))?;
|
||||||
|
|
||||||
|
let mut hash = openssl::sha::Sha1::new();
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
let mut stream = resp.bytes_stream();
|
||||||
|
while let Some(bytes) = stream.next().await {
|
||||||
|
let bytes = bytes.map_err(|e| format_err!("remote read error: {e}"))?;
|
||||||
|
hash.update(&bytes);
|
||||||
|
(out.write_all(&bytes).await).map_err(|e| format_err!("local write error: {e}"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
(out.flush().await).map_err(|e| format_err!("local write error: {e}"))?;
|
||||||
|
drop(out);
|
||||||
|
|
||||||
|
let dl_sha1 = hash.finish();
|
||||||
|
if dl_sha1 != exp_sha1 {
|
||||||
|
if let Err(e) = fs::remove_file(part_file).await {
|
||||||
|
error!("failed to remove {part_file}: {e}");
|
||||||
|
}
|
||||||
|
return Err(format_err!(
|
||||||
|
"invalid content hash: expected {exp}, got {got}",
|
||||||
|
exp = hex::encode(exp_sha1),
|
||||||
|
got = hex::encode(dl_sha1)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sudo<I, S>(program: &str, args: I) -> Result<()>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = S>,
|
||||||
|
S: AsRef<std::ffi::OsStr>,
|
||||||
|
{
|
||||||
|
let mut cmd = if nix::unistd::geteuid().is_root() {
|
||||||
|
let mut cmd = Command::new(program);
|
||||||
|
cmd.args(args);
|
||||||
|
cmd
|
||||||
|
} else {
|
||||||
|
let mut cmd = Command::new("sudo");
|
||||||
|
cmd.arg(program).args(args);
|
||||||
|
cmd
|
||||||
|
};
|
||||||
|
let status = cmd.status().await?;
|
||||||
|
if status.success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(format_err!("{program} failed: {status}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn is_mount_point(target: &str) -> Result<bool> {
|
||||||
|
for line in fs::read_to_string("/proc/self/mounts").await?.lines() {
|
||||||
|
let line = line.trim_ascii();
|
||||||
|
if line.is_empty() || line.starts_with("#") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let split: Vec<_> = line.split_ascii_whitespace().collect();
|
||||||
|
if split.len() < 6 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// let dev = split[0];
|
||||||
|
let mount_point = split[1];
|
||||||
|
// let fstype = split[2];
|
||||||
|
// let mntops = split[3];
|
||||||
|
// let fs_freq = split[4];
|
||||||
|
// let fsck_passno = split[5];
|
||||||
|
|
||||||
|
if mount_point == target {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
60
src/fs.rs
Normal file
60
src/fs.rs
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
use eyre::Result;
|
||||||
|
use std::fs::Metadata;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use tokio::fs::read_dir;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
pub fn spawn_walk_dir(
|
||||||
|
dir: impl Into<PathBuf> + Send + 'static,
|
||||||
|
) -> mpsc::Receiver<Result<(PathBuf, Metadata)>> {
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
tokio::spawn(walk_dir(dir, tx));
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn walk_dir(dir: impl Into<PathBuf>, tx: mpsc::Sender<Result<(PathBuf, Metadata)>>) {
|
||||||
|
let dir: PathBuf = dir.into();
|
||||||
|
|
||||||
|
let mut todo = std::collections::LinkedList::new();
|
||||||
|
if let Ok(rd) = read_dir(&dir).await {
|
||||||
|
todo.push_front(rd);
|
||||||
|
}
|
||||||
|
|
||||||
|
while let Some(rd) = todo.front_mut() {
|
||||||
|
let entry = match rd.next_entry().await {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
if tx.send(Err(e.into())).await.is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
todo.pop_front(); // skip dir on error
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(entry) = entry else {
|
||||||
|
todo.pop_front();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(md) = entry.metadata().await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let is_dir = md.is_dir();
|
||||||
|
|
||||||
|
let Ok(path) = entry.path().strip_prefix(&dir).map(|p| p.to_path_buf()) else {
|
||||||
|
continue; // sub-entry not in dir, weird but semantically, we ignore
|
||||||
|
};
|
||||||
|
|
||||||
|
if tx.send(Ok((path, md))).await.is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// recurse in sub directories
|
||||||
|
if is_dir {
|
||||||
|
if let Ok(rd) = read_dir(entry.path()).await {
|
||||||
|
todo.push_front(rd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
12
src/lib.rs
12
src/lib.rs
@ -1,9 +1,11 @@
|
|||||||
pub mod apply;
|
pub mod apply;
|
||||||
pub mod bootstrap;
|
pub mod bootstrap;
|
||||||
pub mod dls;
|
pub mod dls;
|
||||||
|
pub mod dynlay;
|
||||||
|
pub mod fs;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub layers: Vec<String>,
|
pub layers: Vec<String>,
|
||||||
pub root_user: RootUser,
|
pub root_user: RootUser,
|
||||||
@ -17,18 +19,20 @@ pub struct Config {
|
|||||||
pub users: Vec<User>,
|
pub users: Vec<User>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct RootUser {
|
pub struct RootUser {
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub password_hash: Option<String>,
|
pub password_hash: Option<String>,
|
||||||
pub authorized_keys: Vec<String>,
|
pub authorized_keys: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
|
||||||
pub struct Mount {
|
pub struct Mount {
|
||||||
pub r#type: Option<String>,
|
|
||||||
pub dev: String,
|
pub dev: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub r#type: Option<String>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub options: Option<String>,
|
pub options: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,7 @@ impl<'t> Logger<'t> {
|
|||||||
let archives_read_dir = (fs::read_dir(archives_path).await)
|
let archives_read_dir = (fs::read_dir(archives_path).await)
|
||||||
.map_err(|e| format_err!("failed to list archives: {e}"))?;
|
.map_err(|e| format_err!("failed to list archives: {e}"))?;
|
||||||
|
|
||||||
let mut prev_stamp = ts_trunc(Utc::now());
|
let mut prev_stamp = trunc_ts(Utc::now());
|
||||||
let mut current_log = BufWriter::new(self.open_log(prev_stamp).await?);
|
let mut current_log = BufWriter::new(self.open_log(prev_stamp).await?);
|
||||||
|
|
||||||
tokio::spawn(compress_archives(
|
tokio::spawn(compress_archives(
|
||||||
@ -94,7 +94,7 @@ impl<'t> Logger<'t> {
|
|||||||
prev_stamp: &mut Timestamp,
|
prev_stamp: &mut Timestamp,
|
||||||
out: &mut BufWriter<File>,
|
out: &mut BufWriter<File>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let trunc_ts = ts_trunc(log.ts);
|
let trunc_ts = trunc_ts(log.ts);
|
||||||
if *prev_stamp < trunc_ts {
|
if *prev_stamp < trunc_ts {
|
||||||
// switch log
|
// switch log
|
||||||
out.flush().await?;
|
out.flush().await?;
|
||||||
@ -193,7 +193,7 @@ async fn copy(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ts_trunc(ts: Timestamp) -> Timestamp {
|
pub fn trunc_ts(ts: Timestamp) -> Timestamp {
|
||||||
ts.duration_trunc(TRUNC_DELTA)
|
ts.duration_trunc(TRUNC_DELTA)
|
||||||
.expect("duration_trunc failed")
|
.expect("duration_trunc failed")
|
||||||
}
|
}
|
||||||
@ -243,7 +243,7 @@ async fn compress(path: impl AsRef<Path>) {
|
|||||||
.await
|
.await
|
||||||
.map_err(|e| format_err!("open {path_str} failed: {e}"))?;
|
.map_err(|e| format_err!("open {path_str} failed: {e}"))?;
|
||||||
|
|
||||||
let out_path = path.with_extension("zstd");
|
let out_path = path.with_extension("zst");
|
||||||
let out = (File::create(&out_path).await) // create output
|
let out = (File::create(&out_path).await) // create output
|
||||||
.map_err(|e| format_err!("create {} failed: {e}", out_path.to_string_lossy()))?;
|
.map_err(|e| format_err!("create {} failed: {e}", out_path.to_string_lossy()))?;
|
||||||
|
|
||||||
@ -285,14 +285,16 @@ pub async fn log_files(log_path: &str, log_name: &str) -> std::io::Result<Vec<Lo
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let (name, compressed) = file_name
|
let Some((name, ext)) = file_name.rsplit_once('.') else {
|
||||||
.strip_suffix(".zstd")
|
|
||||||
.map_or((file_name, false), |s| (s, true));
|
|
||||||
|
|
||||||
let Some(name) = name.strip_suffix(".log") else {
|
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let compressed = match ext {
|
||||||
|
"zst" => true,
|
||||||
|
"log" => false,
|
||||||
|
_ => continue,
|
||||||
|
};
|
||||||
|
|
||||||
let Some((name, timestamp)) = name.rsplit_once('.') else {
|
let Some((name, timestamp)) = name.rsplit_once('.') else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@ -331,12 +333,12 @@ impl PartialOrd for LogFile {
|
|||||||
|
|
||||||
impl LogFile {
|
impl LogFile {
|
||||||
pub async fn copy_to(&self, out: &mut (impl AsyncWrite + Unpin)) -> io::Result<u64> {
|
pub async fn copy_to(&self, out: &mut (impl AsyncWrite + Unpin)) -> io::Result<u64> {
|
||||||
let mut input = File::open(&self.path).await?;
|
let input = &mut File::open(&self.path).await?;
|
||||||
if self.compressed {
|
if self.compressed {
|
||||||
let mut out = ZstdDecoder::new(out);
|
let out = &mut ZstdDecoder::new(out);
|
||||||
tokio::io::copy(&mut input, &mut out).await
|
tokio::io::copy(input, out).await
|
||||||
} else {
|
} else {
|
||||||
tokio::io::copy(&mut input, out).await
|
tokio::io::copy(input, out).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
18
test-dkl
18
test-dkl
@ -3,7 +3,7 @@ set -ex
|
|||||||
|
|
||||||
dkl=target/debug/dkl
|
dkl=target/debug/dkl
|
||||||
|
|
||||||
test=${1:-log}
|
test=${1:-dynlay}
|
||||||
|
|
||||||
export RUST_LOG=debug
|
export RUST_LOG=debug
|
||||||
|
|
||||||
@ -19,8 +19,20 @@ case $test in
|
|||||||
cat tmp/log/bash.log
|
cat tmp/log/bash.log
|
||||||
;;
|
;;
|
||||||
|
|
||||||
log)
|
log-ls)
|
||||||
$dkl log --log-path tmp/log bash 20250720_12 20250720_16
|
$dkl log --log-path tmp/log bash ls -l
|
||||||
|
;;
|
||||||
|
log-cat)
|
||||||
|
$dkl log --log-path tmp/log bash cat 20250720_12 20301231_23
|
||||||
|
;;
|
||||||
|
log-clean)
|
||||||
|
$dkl log --log-path tmp/log bash ls -l
|
||||||
|
$dkl log --log-path tmp/log bash cleanup 0
|
||||||
|
;;
|
||||||
|
|
||||||
|
dynlay)
|
||||||
|
mkdir -p tmp/system
|
||||||
|
$dkl dynlay --layers-dir tmp/dynlay --chroot tmp/system kubernetes v1.33.3_containerd.2.0.5
|
||||||
;;
|
;;
|
||||||
|
|
||||||
*) echo 1>&2 "unknown test: $test"; exit 1 ;;
|
*) echo 1>&2 "unknown test: $test"; exit 1 ;;
|
||||||
|
|||||||
9
test-dls
9
test-dls
@ -18,3 +18,12 @@ $dls cluster cluster ssh-sign ~/.ssh/id_ed25519.pub
|
|||||||
$dls host m1 | jq '{Name, ClusterName, IPs}'
|
$dls host m1 | jq '{Name, ClusterName, IPs}'
|
||||||
$dls host m1 bootstrap-config
|
$dls host m1 bootstrap-config
|
||||||
|
|
||||||
|
export DLS_DLSET=$($dls dl-set sign --expiry 1d \
|
||||||
|
cluster:cluster:addons \
|
||||||
|
host:m1:kernel:initrd:bootstrap.tar \
|
||||||
|
host:m2:config:bootstrap-config:boot.vmdk)
|
||||||
|
|
||||||
|
$dls dl-set show
|
||||||
|
$dls dl-set fetch host m2 bootstrap-config
|
||||||
|
rm host_m2_bootstrap-config
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user