25 Commits

Author SHA1 Message Date
Mikaël Cluseau
e7769155e1 merged layer handling 2026-04-21 07:36:59 +02:00
Mikaël Cluseau
c8bbbf858a prepare for erofs 2026-04-20 09:37:17 +02:00
Mikaël Cluseau
9a65ca5552 test w/o crypt too 2026-04-20 08:55:28 +02:00
Mikaël Cluseau
8596389970 umount modules before switch_root 2026-04-18 20:07:32 +02:00
Mikaël Cluseau
ba0a304095 bump docker layers 2026-04-18 18:58:52 +02:00
Mikaël Cluseau
798317432d chore: Release init version 2.6.0 2026-04-18 18:54:23 +02:00
Mikaël Cluseau
5c86af7614 feat: erofs layers 2026-04-18 18:53:29 +02:00
Mikaël Cluseau
0c4f636477 chore: Release init version 2.5.3 2026-02-10 21:26:05 +01:00
Mikaël Cluseau
7b30eb4435 base64: decode like dkl 2026-02-10 21:26:05 +01:00
Mikaël Cluseau
2e337f9957 bump deps 2026-02-10 17:51:16 +01:00
Mikaël Cluseau
dff9142bdc lvm: PV also match udev filter 2026-02-10 17:51:12 +01:00
Mikaël Cluseau
74c8ae293d bump versions 2026-01-31 21:09:03 +01:00
Mikaël Cluseau
f886692c7f docker: bump alpine 2026-01-26 11:40:07 +01:00
Mikaël Cluseau
96f801e27d update deps 2026-01-25 22:00:36 +01:00
Mikaël Cluseau
3f7cd80a96 chore: Release init version 2.5.2 2026-01-25 22:00:36 +01:00
Mikaël Cluseau
41c3f9badd bump deps, rust, alpine, and add the real iproute2 2025-12-17 17:46:43 +01:00
Mikaël Cluseau
01a0073e78 cryptsetup: allow 'mass' reuse 2025-11-22 16:40:53 +01:00
Mikaël Cluseau
ac9d7e8d9d allow device matching by udev properties 2025-11-10 19:22:06 +01:00
Mikaël Cluseau
148aa0cc44 cargo update 2025-09-04 05:41:40 +02:00
Mikaël Cluseau
eb81cd3b5c bootstrap: add VPNs from bootstrap volume 2025-09-03 16:37:39 +02:00
Mikaël Cluseau
f892178d5d wget -> reqwest, now we can have openssl :) 2025-07-21 17:48:26 +02:00
Mikaël Cluseau
cb62ac0ed8 remove system archive feature
Just compress the initrd with zstd.
Remove rsmount dependency, mtab is easy enough to parse.
2025-07-21 17:12:44 +02:00
Mikaël Cluseau
0d9d087afd use shared libs, enabling openssl in init 2025-07-21 03:25:48 +02:00
Mikaël Cluseau
e484802284 bootstrap: chore: extract fn mount_modules 2025-07-18 08:19:17 +02:00
Mikaël Cluseau
423a9c53e6 move configs to dkl crate 2025-07-17 16:48:38 +02:00
26 changed files with 3217 additions and 860 deletions

2687
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "init" name = "init"
version = "2.4.1" version = "2.6.0"
edition = "2024" edition = "2024"
[profile.release] [profile.release]
@@ -13,10 +13,10 @@ codegen-units = 1
[dependencies] [dependencies]
libc = { version = "0.2", default-features = false } libc = { version = "0.2", default-features = false }
env_logger = "0.11.3" env_logger = "0.11.3"
eyre = "0.6.12" eyre = { version = "0.6.12" }
itertools = "0.14.0" itertools = "0.14.0"
log = "0.4.21" log = "0.4.21"
nix = { version = "0.30.1", features = ["feature", "mount", "process", "reboot", "signal"] } nix = { version = "0.31.1", features = ["feature", "mount", "process", "reboot", "signal"] }
regex = "1.11.1" regex = "1.11.1"
serde = { version = "1.0.198", features = ["derive"] } serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116" serde_json = "1.0.116"
@@ -24,9 +24,10 @@ serde_yaml = "0.9.34"
shell-escape = "0.1.5" shell-escape = "0.1.5"
tokio = { version = "1.38.0", features = ["rt", "net", "fs", "process", "io-std", "io-util", "sync", "macros", "signal"] } tokio = { version = "1.38.0", features = ["rt", "net", "fs", "process", "io-std", "io-util", "sync", "macros", "signal"] }
termios = "0.3.3" termios = "0.3.3"
zstd = "0.13.3"
unix_mode = "0.1.4" unix_mode = "0.1.4"
cpio = "0.4.1"
lz4 = "1.28.1"
base64 = "0.22.1"
sys-info = "0.9.1" sys-info = "0.9.1"
dkl = { git = "https://novit.tech/direktil/dkl", version = "1.0.0" }
openssl = "0.10.73"
reqwest = { version = "0.13.1", features = ["native-tls"] }
glob = "0.3.3"
hex = "0.4.3"

View File

@@ -1,44 +1,55 @@
from rust:1.88.0-alpine as rust from rust:1.95.0-alpine as rust
run apk add --no-cache git musl-dev libudev-zero-dev # pkgconfig cryptsetup-dev lvm2-dev clang-dev clang-static run apk add --no-cache git musl-dev libudev-zero-dev openssl-dev cryptsetup-dev lvm2-dev clang-libs clang-dev
workdir /src workdir /src
copy . . copy . .
run --mount=type=cache,id=novit-rs,target=/usr/local/cargo/registry \ run --mount=type=cache,id=novit-rs,target=/usr/local/cargo/registry \
--mount=type=cache,id=novit-rs-target,sharing=private,target=/src/target \ --mount=type=cache,id=novit-rs-target,sharing=private,target=/src/target \
cargo build --release && cp target/release/init / RUSTFLAGS="-C target-feature=-crt-static" cargo install --path . --root /dist
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------
from alpine:3.22.0 as initrd from alpine:3.23.4 as system
run apk add zstd lz4
workdir /system workdir /system
run . /etc/os-release \ run . /etc/os-release \
&& wget -O- https://dl-cdn.alpinelinux.org/alpine/v${VERSION_ID%.*}/releases/x86_64/alpine-minirootfs-${VERSION_ID}-x86_64.tar.gz |tar zxv && wget -O- https://dl-cdn.alpinelinux.org/alpine/v${VERSION_ID%.*}/releases/x86_64/alpine-minirootfs-${VERSION_ID}-x86_64.tar.gz |tar zxv
run apk add --no-cache --update -p . musl coreutils \ run apk add --no-cache --update -p . musl libgcc coreutils \
lvm2 lvm2-extra lvm2-dmeventd udev cryptsetup \ iproute2 lvm2 lvm2-extra lvm2-dmeventd udev cryptsetup \
e2fsprogs lsblk openssl openssh-server wireguard-tools-wg-quick \ e2fsprogs lsblk openssl openssh-server wireguard-tools-wg-quick \
&& rm -rf usr/share/apk var/cache/apk etc/motd && rm -rf usr/share/apk var/cache/apk etc/motd dev/*
copy etc/sshd_config etc/ssh/sshd_config copy etc/sshd_config etc/ssh/sshd_config
run mkdir /layer \ copy --from=rust /dist/bin/init /system/init
&& mv dev /layer \
# && find |cpio -H newc -o |lz4 >/layer/system.alz4
&& find |cpio -H newc -o |zstd -19 >/layer/system.azstd
workdir /layer
copy --from=rust /init init
run mkdir -p bin run var/log; cd bin && for cmd in init-version init-connect bootstrap; do ln -s ../init $cmd; done run mkdir -p bin run var/log; cd bin && for cmd in init-version init-connect bootstrap; do ln -s ../init $cmd; done
# check viability # check viability
run chroot . init-version run chroot . init-version
run find * |cpio -H newc -oF /initrd # ------------------------------------------------------------------------
from alpine:3.23.4 as initrd
copy --from=system /system /system
run cd /system && find * |cpio -H newc -oF /initrd
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------
from alpine:3.22.0 from debian:stable-backports as initramfs
copy --from=initrd /initrd / run apt update && apt install -y erofs-utils
copy --from=system /system /system
run mkfs.erofs \
-z lzma -C131072 -Efragments,ztailpacking \
-T0 --all-time --ignore-mtime \
/initramfs /system
# ------------------------------------------------------------------------
from alpine:3.23.4
copy --from=initrd /initrd /initrd
entrypoint ["base64","/initrd"] entrypoint ["base64","/initrd"]
#copy --from=initramfs /initramfs /
#entrypoint ["base64","/initramfs"]

View File

@@ -14,8 +14,7 @@ cpio --quiet --extract --file $base_initrd --directory $dir
(cd $dir && find * |cpio --create -H newc -R 0:0) >test-initrd.cpio (cd $dir && find * |cpio --create -H newc -R 0:0) >test-initrd.cpio
cpio --quiet -tF test-initrd.cpio
if cpio -tF test-initrd.cpio 2>&1 |grep bytes.of.junk; then echo "bad cpio archive"; exit 1; fi if cpio -tF test-initrd.cpio 2>&1 |grep bytes.of.junk; then echo "bad cpio archive"; exit 1; fi
lz4 -l9v test-initrd.cpio && mv test-initrd.cpio.lz4 test-initrd.cpio zstd -12 -T0 -vf test-initrd.cpio && mv test-initrd.cpio.zst test-initrd.cpio

View File

@@ -4,6 +4,7 @@ modd.conf {}
prep: cargo test prep: cargo test
prep: cargo build prep: cargo build
prep: debug/init-version prep: debug/init-version
#prep: cargo run --bin test
} }
target/debug/init Dockerfile { target/debug/init Dockerfile {

View File

@@ -1 +0,0 @@
pub mod config;

View File

@@ -1,192 +0,0 @@
use std::collections::BTreeMap as Map;
pub const TAKE_ALL: i16 = -1;
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Config {
pub anti_phishing_code: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub keymap: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub modules: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resolv_conf: Option<String>,
#[serde(default)]
pub vpns: Map<String, String>,
pub networks: Vec<Network>,
pub auths: Vec<Auth>,
#[serde(default)]
pub ssh: SSHServer,
#[serde(default)]
pub pre_lvm_crypt: Vec<CryptDev>,
#[serde(default)]
pub lvm: Vec<LvmVG>,
#[serde(default)]
pub crypt: Vec<CryptDev>,
#[serde(skip_serializing_if = "Option::is_none")]
pub signer_public_key: Option<String>,
pub bootstrap: Bootstrap,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Auth {
pub name: String,
#[serde(alias = "sshKey")]
#[serde(skip_serializing_if = "Option::is_none")]
pub ssh_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Network {
pub name: String,
pub interfaces: Vec<NetworkInterface>,
pub script: String,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct NetworkInterface {
pub var: String,
pub n: i16,
pub regexps: Vec<String>,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct SSHServer {
pub listen: String,
pub user_ca: Option<String>,
}
impl Default for SSHServer {
fn default() -> Self {
Self {
listen: "[::]:22".to_string(),
user_ca: None,
}
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmVG {
#[serde(alias = "vg")]
pub name: String,
pub pvs: LvmPV,
#[serde(default)]
pub defaults: LvmLVDefaults,
pub lvs: Vec<LvmLV>,
}
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct LvmLVDefaults {
#[serde(default)]
pub fs: Filesystem,
#[serde(default)]
pub raid: Raid,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum Filesystem {
Ext4,
Xfs,
Btrfs,
Other(String),
}
impl Filesystem {
pub fn fstype(&self) -> &str {
use Filesystem as F;
match self {
F::Ext4 => "ext4",
F::Xfs => "xfs",
F::Btrfs => "btrfs",
F::Other(t) => t,
}
}
}
impl Default for Filesystem {
fn default() -> Self {
Filesystem::Ext4
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmLV {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub fs: Option<Filesystem>,
#[serde(skip_serializing_if = "Option::is_none")]
pub raid: Option<Raid>,
#[serde(flatten)]
pub size: LvSize,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum LvSize {
Size(String),
Extents(String),
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct LvmPV {
pub n: i16,
pub regexps: Vec<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct CryptDev {
pub name: String,
#[serde(flatten)]
pub filter: DevFilter,
pub optional: Option<bool>,
}
impl CryptDev {
pub fn optional(&self) -> bool {
self.optional.unwrap_or_else(|| self.filter.is_prefix())
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum DevFilter {
Dev(String),
Prefix(String),
}
impl DevFilter {
pub fn is_dev(&self) -> bool {
match self {
Self::Dev(_) => true,
_ => false,
}
}
pub fn is_prefix(&self) -> bool {
match self {
Self::Prefix(_) => true,
_ => false,
}
}
}
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
pub struct Raid {
pub mirrors: Option<u8>,
pub stripes: Option<u8>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Bootstrap {
pub dev: String,
pub seed: Option<String>,
}

View File

@@ -1,5 +1,4 @@
pub mod bootstrap; pub mod bootstrap;
pub mod connect_boot;
pub mod init; pub mod init;
pub mod init_input; pub mod init_input;
pub mod version; pub mod version;

View File

@@ -1,11 +1,13 @@
use eyre::{format_err, Result}; use eyre::{format_err, Result};
use log::{error, info, warn}; use log::{error, info, warn};
use std::collections::BTreeSet as Set; use std::collections::BTreeSet as Set;
use std::convert::Infallible;
use std::os::unix::fs::symlink; use std::os::unix::fs::symlink;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use tokio::{fs, process::Command}; use tokio::{fs, process::Command};
use crate::{bootstrap::config::Config, cmd::version::version_string, dklog, input, utils}; use crate::{cmd::version::version_string, dklog, input, utils};
use dkl::bootstrap::Config;
mod bootstrap; mod bootstrap;
mod dmcrypt; mod dmcrypt;
@@ -52,33 +54,15 @@ pub async fn run() {
info!("Linux version {kernel_version}"); info!("Linux version {kernel_version}");
// mount basic filesystems // mount basic filesystems
mount(None, "/proc", "proc", None).await; mount(None::<&str>, "/proc", "proc", None).await;
mount(None, "/sys", "sysfs", None).await; mount(None::<&str>, "/sys", "sysfs", None).await;
mount(None, "/dev", "devtmpfs", None).await; mount(None::<&str>, "/dev", "devtmpfs", None).await;
mount(None, "/dev/pts", "devpts", Some("gid=5,mode=620")).await; mount(None::<&str>, "/dev/pts", "devpts", Some("gid=5,mode=620")).await;
if utils::bool_param("debug") { if utils::bool_param("debug") {
log::set_max_level(log::LevelFilter::Debug); log::set_max_level(log::LevelFilter::Debug);
} }
// extract system archive
retry_or_ignore(async || {
if fs::try_exists("system.azstd").await? {
info!("unpacking system.azstd");
let zarch = fs::read("system.azstd").await?;
let arch = zstd::Decoder::new(zarch.as_slice())?;
extract_cpio(arch).await
} else if fs::try_exists("system.alz4").await? {
info!("unpacking system.alz4");
let zarch = fs::read("system.alz4").await?;
let arch = lz4::Decoder::new(zarch.as_slice())?;
extract_cpio(arch).await
} else {
return Ok(());
}
})
.await;
// load config // load config
let cfg: Config = retry(async || { let cfg: Config = retry(async || {
let cfg = (fs::read("config.yaml").await) let cfg = (fs::read("config.yaml").await)
@@ -95,24 +79,8 @@ pub async fn run() {
// tokio::spawn(child_reaper()); // tokio::spawn(child_reaper());
// mount modules // mount modules
if let Some(ref modules) = cfg.modules { if let Some(modules) = cfg.modules.as_deref() {
retry_or_ignore(async || { retry_or_ignore(async || mount_modules(modules, &kernel_version).await).await;
info!("mounting modules");
mount(Some(modules), "/modules", "squashfs", None).await;
fs::create_dir_all("/lib/modules").await?;
let modules_path = &format!("/modules/lib/modules/{kernel_version}");
if !std::fs::exists(modules_path)? {
return Err(format_err!(
"invalid modules package: {modules_path} should exist"
));
}
symlink(modules_path, format!("/lib/modules/{kernel_version}"))?;
Ok(())
})
.await;
} else { } else {
warn!("modules NOT mounted (not configured)"); warn!("modules NOT mounted (not configured)");
} }
@@ -139,6 +107,7 @@ pub async fn run() {
// Wireguard VPNs // Wireguard VPNs
for (name, conf) in &cfg.vpns { for (name, conf) in &cfg.vpns {
retry_or_ignore(async || { retry_or_ignore(async || {
info!("starting VPN {name}");
let dir = "/etc/wireguard"; let dir = "/etc/wireguard";
fs::create_dir_all(dir).await?; fs::create_dir_all(dir).await?;
@@ -175,11 +144,31 @@ pub async fn run() {
warn!("failed to copy {INIT_LOG} to system: {e}"); warn!("failed to copy {INIT_LOG} to system: {e}");
} }
if let Err(e) = nix::mount::umount2("/modules", nix::mount::MntFlags::MNT_DETACH) {
warn!("failed to umount /modules: {e}");
}
retry(async || switch_root("/system").await).await; retry(async || switch_root("/system").await).await;
} }
use std::path::Path; use std::path::Path;
async fn mount_modules(modules: &str, kernel_version: &str) -> Result<()> {
info!("mounting modules");
mount(Some(modules), "/modules", "squashfs", None).await;
fs::create_dir_all("/lib/modules").await?;
let modules_path = &format!("/modules/lib/modules/{kernel_version}");
if !std::fs::exists(modules_path)? {
return Err(format_err!(
"invalid modules package: {modules_path} should exist"
));
}
symlink(modules_path, format!("/lib/modules/{kernel_version}"))?;
Ok(())
}
async fn chmod(path: impl AsRef<Path>, mode: u32) -> std::io::Result<()> { async fn chmod(path: impl AsRef<Path>, mode: u32) -> std::io::Result<()> {
use std::fs::Permissions; use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
@@ -188,85 +177,30 @@ async fn chmod(path: impl AsRef<Path>, mode: u32) -> std::io::Result<()> {
fs::set_permissions(path, perms).await fs::set_permissions(path, perms).await
} }
async fn extract_cpio(mut arch: impl std::io::Read) -> Result<()> { async fn mount<S: AsRef<Path>>(
loop { src: Option<S>,
let rd = cpio::NewcReader::new(&mut arch)?; dst: impl AsRef<Path>,
let entry = rd.entry(); fstype: &str,
if entry.is_trailer() { opts: Option<&str>,
return Ok(()); ) {
} let src = src.as_ref().map(|s| s.as_ref());
let src_str = src.map(|s| s.display().to_string());
let src_str = src_str.as_deref();
let path = entry.name().to_string(); let dst = dst.as_ref();
let dst_str = &dst.display().to_string();
if let Err(e) = extract_cpio_entry(rd, &path).await {
return Err(format_err!("failed to extract {path}: {e}"));
}
}
}
async fn extract_cpio_entry<R: std::io::Read>(
rd: cpio::NewcReader<R>,
path: impl AsRef<Path>,
) -> Result<()> {
use std::os::unix::fs::chown;
use unix_mode::Type;
let entry = rd.entry();
let path = path.as_ref();
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await?;
}
let mode = entry.mode();
let uid = entry.uid();
let gid = entry.gid();
match Type::from(mode) {
Type::Dir => {
fs::create_dir_all(path).await?;
}
Type::File => {
let mut data = vec![];
rd.to_writer(&mut data)?;
fs::write(path, data).await?;
}
Type::Symlink => {
let mut data = vec![];
rd.to_writer(&mut data)?;
let target = &Path::new(std::str::from_utf8(&data)?);
tokio::fs::symlink(target, path).await?;
return Ok(());
}
_ => {
warn!("{path:?}: unknown file type: {:?}", Type::from(mode));
return Ok(());
}
}
chmod(path, mode).await?;
chown(path, Some(uid), Some(gid))?;
Ok(())
}
async fn mount(src: Option<&str>, dst: &str, fstype: &str, opts: Option<&str>) {
if let Err(e) = fs::create_dir_all(dst).await { if let Err(e) = fs::create_dir_all(dst).await {
error!("failed to create dir {dst}: {e}"); error!("failed to create dir {dst_str}: {e}");
} }
retry_or_ignore(async || {
let mut is_file = false; let mut is_file = false;
if let Some(src) = src { if let Some(src) = src_str {
retry_or_ignore(async || {
is_file = (fs::metadata(src).await) is_file = (fs::metadata(src).await)
.map_err(|e| format_err!("stat {src} failed: {e}"))? .map_err(|e| format_err!("stat {src} failed: {e}"))?
.is_file(); .is_file();
Ok(())
})
.await;
match fstype { match fstype {
"ext4" => { "ext4" => {
exec("fsck.ext4", &["-p", src]).await; exec("fsck.ext4", &["-p", src]).await;
@@ -275,12 +209,11 @@ async fn mount(src: Option<&str>, dst: &str, fstype: &str, opts: Option<&str>) {
} }
} }
let mut args = vec![src.unwrap_or("none"), dst, "-t", fstype]; let mut args = vec![src_str.unwrap_or("none"), dst_str, "-t", fstype];
if let Some(opts) = opts { if let Some(opts) = opts {
args.extend(["-o", opts]); args.extend(["-o", opts]);
} }
retry_or_ignore(async || {
// if it's a file, we need to use a loopdev // if it's a file, we need to use a loopdev
if is_file { if is_file {
// loopdev crate has annoying dependencies, just use the normal mount program // loopdev crate has annoying dependencies, just use the normal mount program
@@ -288,11 +221,17 @@ async fn mount(src: Option<&str>, dst: &str, fstype: &str, opts: Option<&str>) {
} }
let (cmd_str, _) = cmd_str("mount", &args); let (cmd_str, _) = cmd_str("mount", &args);
let flags = nix::mount::MsFlags::empty();
info!("# {cmd_str}",); info!("# {cmd_str}",);
nix::mount::mount(src, dst, Some(fstype), flags, opts)
.map_err(|e| format_err!("mount {dst} failed: {e}")) let mount = |flags| nix::mount::mount(src, dst, Some(fstype), flags, opts);
use nix::{errno::Errno, mount::MsFlags};
match mount(MsFlags::empty()) {
Err(Errno::EACCES) => mount(MsFlags::MS_RDONLY),
r => r,
}
.map_err(|e| format_err!("mount {dst_str} failed: {e}"))
}) })
.await .await
} }
@@ -307,6 +246,25 @@ async fn start_daemon(prog: &str, args: &[&str]) {
.await; .await;
} }
async fn try_exec_cmd(mut cmd: tokio::process::Command) -> Result<()> {
info!(
"# {} {}",
cmd.as_std().get_program().to_string_lossy(),
cmd.as_std()
.get_args()
.map(|a| a.to_string_lossy())
.collect::<Vec<_>>()
.join(" ")
);
let s = cmd.status().await?;
if s.success() {
Ok(())
} else {
Err(format_err!("command failed: {s}"))
}
}
async fn try_exec(prog: &str, args: &[&str]) -> Result<()> { async fn try_exec(prog: &str, args: &[&str]) -> Result<()> {
let (cmd_str, mut cmd) = cmd_str(prog, args); let (cmd_str, mut cmd) = cmd_str(prog, args);
info!("# {cmd_str}"); info!("# {cmd_str}");
@@ -406,13 +364,7 @@ async fn child_reaper() {
} }
} }
macro_rules! cstr { async fn switch_root(root: &str) -> Result<Infallible> {
($s:expr) => {
std::ffi::CString::new($s)?.as_c_str()
};
}
async fn switch_root(root: &str) -> Result<()> {
info!("killing all processes and switching root"); info!("killing all processes and switching root");
dklog::LOG.close().await; dklog::LOG.close().await;
@@ -423,7 +375,13 @@ async fn switch_root(root: &str) -> Result<()> {
eprintln!("failed to kill processes: {e}"); eprintln!("failed to kill processes: {e}");
} }
nix::unistd::execv( macro_rules! cstr {
($s:expr) => {
std::ffi::CString::new($s)?.as_c_str()
};
}
Ok(nix::unistd::execv(
cstr!("/sbin/switch_root"), cstr!("/sbin/switch_root"),
&[ &[
cstr!("switch_root"), cstr!("switch_root"),
@@ -432,8 +390,5 @@ async fn switch_root(root: &str) -> Result<()> {
cstr!(root), cstr!(root),
cstr!("/sbin/init"), cstr!("/sbin/init"),
], ],
) )?)
.unwrap();
unreachable!();
} }

View File

@@ -1,25 +1,41 @@
use eyre::{format_err, Result}; use eyre::{format_err, Result};
use log::{info, warn}; use log::{info, warn};
use std::path::Path; use std::path::{Path, PathBuf};
use tokio::{ use tokio::{
fs, fs,
io::{AsyncBufReadExt, BufReader}, io::{AsyncBufReadExt, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
}; };
use super::{exec, mount, retry, retry_or_ignore, try_exec}; use dkl::{
use crate::bootstrap::config::Config; self,
use crate::{dkl, utils}; apply::{self, chroot, set_perms},
base64_decode,
bootstrap::Config,
};
use super::{exec, mount, retry, retry_or_ignore, try_exec, try_exec_cmd};
use crate::{fs::walk_dir, utils};
pub async fn bootstrap(cfg: Config) { pub async fn bootstrap(cfg: Config) {
let verifier = retry(async || Verifier::from_config(&cfg)).await; let verifier = retry(async || Verifier::from_config(&cfg)).await;
let bs = cfg.bootstrap; let bs = &cfg.bootstrap;
mount(Some(&bs.dev), "/bootstrap", "ext4", None).await;
// VPNs
for vpn_conf in walk_dir("/bootstrap/vpns").await {
if !vpn_conf.ends_with(".conf") {
continue;
}
retry_or_ignore(async || { retry_or_ignore(async || {
mount(Some(&bs.dev), "/bootstrap", "ext4", None).await; info!("starting VPN from {vpn_conf}");
Ok(()) try_exec("wg-quick", &["up", &vpn_conf]).await
}) })
.await; .await;
}
// prepare system
let boot_version = utils::param("version").unwrap_or("current"); let boot_version = utils::param("version").unwrap_or("current");
let base_dir = &format!("/bootstrap/{boot_version}"); let base_dir = &format!("/bootstrap/{boot_version}");
@@ -38,7 +54,7 @@ pub async fn bootstrap(cfg: Config) {
}) })
.await; .await;
mount_system(&sys_cfg, base_dir, &verifier).await; mount_system(&sys_cfg, &cfg, base_dir, &verifier).await;
retry_or_ignore(async || { retry_or_ignore(async || {
let path = "/etc/resolv.conf"; let path = "/etc/resolv.conf";
@@ -50,13 +66,11 @@ pub async fn bootstrap(cfg: Config) {
}) })
.await; .await;
retry_or_ignore(async || apply_files(&sys_cfg.files, "/system").await).await; retry_or_ignore(async || apply::files(&sys_cfg.files, "/system", false).await).await;
apply_groups(&sys_cfg.groups, "/system").await; apply_groups(&sys_cfg.groups, "/system").await;
apply_users(&sys_cfg.users, "/system").await; apply_users(&sys_cfg.users, "/system").await;
// TODO VPNs
mount_filesystems(&sys_cfg.mounts, "/system").await; mount_filesystems(&sys_cfg.mounts, "/system").await;
retry_or_ignore(async || { retry_or_ignore(async || {
@@ -77,49 +91,39 @@ impl Verifier {
return Ok(Self { pubkey: None }); return Ok(Self { pubkey: None });
}; };
use base64::{prelude::BASE64_STANDARD, Engine}; let pubkey = base64_decode(pubkey)?;
let pubkey = BASE64_STANDARD.decode(pubkey)?;
let pubkey = Some(pubkey); let pubkey = Some(pubkey);
return Ok(Self { pubkey }); return Ok(Self { pubkey });
} }
async fn verify_path(&self, path: &str) -> Result<()> { async fn verify_path(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
let path = path.as_ref();
let p = path.display();
let data = (fs::read(path).await).map_err(|e| format_err!("failed to read {p}: {e}"))?;
let Some(ref pubkey) = self.pubkey else { let Some(ref pubkey) = self.pubkey else {
return Ok(()); return Ok(data);
}; };
info!("verifying {path}"); info!("verifying {p}");
let mut pubkey = std::io::Cursor::new(pubkey); let sig = path.with_added_extension("sig");
let sig = (fs::read(&sig).await)
.map_err(|e| format_err!("failed to read {}: {e}", sig.display()))?;
let sig = format!("{path}.sig"); use openssl::{hash::MessageDigest, pkey::PKey, sign::Verifier};
let pubkey = PKey::public_key_from_der(pubkey)?;
use std::process::Stdio; let sig_ok = Verifier::new(MessageDigest::sha512(), &pubkey)?
use tokio::process::Command; .verify_oneshot(&sig, &data)
.map_err(|e| format_err!("verify failed: {e}"))?;
let mut openssl = Command::new("openssl") if sig_ok {
.stdin(Stdio::piped()) Ok(data)
.args(&[
"dgst",
"-sha512",
"-verify",
"/dev/stdin",
"-signature",
&sig,
path,
])
.spawn()?;
tokio::io::copy(&mut pubkey, openssl.stdin.as_mut().unwrap()).await?;
let status = openssl.wait().await?;
if status.success() {
Ok(())
} else { } else {
Err(format_err!( Err(format_err!("signature verification failed for {p}"))
"signature verification failed for {path}: {status}"
))
} }
} }
} }
@@ -152,19 +156,27 @@ async fn seed_config(
return Err(format_err!("{cfg_path} does not exist after seeding")); return Err(format_err!("{cfg_path} does not exist after seeding"));
} }
verifier.verify_path(&cfg_path).await?; verifier.verify_path(&cfg_path).await
Ok(fs::read(cfg_path).await?)
} }
async fn fetch_bootstrap(seed_url: &str, output_file: &str) -> Result<()> { async fn fetch_bootstrap(seed_url: &str, output_file: &str) -> Result<()> {
let tmp_file = &format!("{output_file}.new"); let seed_url: reqwest::Url = seed_url.parse()?;
let _ = fs::remove_file(tmp_file).await;
try_exec("wget", &["-O", tmp_file, seed_url]).await?;
fs::rename(tmp_file, output_file) info!(
.await "fetching {output_file} from {}",
.map_err(|e| format_err!("seed rename failed: {e}"))?; seed_url.host_str().unwrap_or("<no host>")
);
let resp = reqwest::get(seed_url).await?;
if !resp.status().is_success() {
return Err(format_err!("HTTP request failed: {}", resp.status()));
}
let data = (resp.bytes().await).map_err(|e| format_err!("HTTP download failed: {e}"))?;
(fs::write(output_file, &data).await)
.map_err(|e| format_err!("output file write failed: {e}"))?;
Ok(()) Ok(())
} }
@@ -180,42 +192,136 @@ fn default_root_tmpfs_opts() -> Option<String> {
Some(format!("size={fs_size}m")) Some(format!("size={fs_size}m"))
} }
async fn mount_system(cfg: &dkl::Config, bs_dir: &str, verifier: &Verifier) { struct LayerMounter<'t> {
bs_dir: &'t str,
layers_dir: &'t str,
verifier: &'t Verifier,
lower_dir: String,
}
impl LayerMounter<'_> {
fn src_path(&self, name: &str) -> PathBuf {
let mut p = PathBuf::from(self.bs_dir);
p.push(name);
if name != "merged" {
p.add_extension("fs");
}
p
}
async fn exists(&self, name: &str) -> bool {
retry(async || Ok(fs::try_exists(self.src_path(name)).await?)).await
}
async fn mount(&mut self, name: &str) {
self.mount_path(self.src_path(name), name, true).await
}
async fn mount_path(&mut self, src: impl AsRef<Path>, name: &str, verify: bool) {
let src = src.as_ref();
let tgt_dir = PathBuf::from(self.layers_dir).join(name);
let tgt = tgt_dir.with_added_extension("fs");
if let Err(e) = fs::create_dir_all(&tgt_dir).await {
warn!("mkdir -p {}: {e}", tgt_dir.display());
}
let mount_src = if name == "merged" {
retry(async || {
let data = self.verifier.verify_path(src).await?;
let data = MergedLayer::from_bytes(&data)
.ok_or(format_err!("{}: invalid data", src.display()))?;
data.create(&tgt)
.await
.map_err(|e| format_err!("write {}: {e}", tgt.display()))?;
let dm_name = &format!("system");
let mut cmd = tokio::process::Command::new("veritysetup");
cmd.arg("open")
.arg(format!("--hash-offset={}", data.hash_offset()))
.arg(&tgt)
.arg(dm_name)
.arg(&tgt)
.arg(data.root_hash_hex());
try_exec_cmd(cmd).await?;
Ok(PathBuf::from("/dev/mapper").join(dm_name))
})
.await
} else {
retry(async || {
let src = if verify {
self.verifier.verify_path(src).await?
} else {
fs::read(src).await?
};
fs::write(&tgt, &src).await?;
Ok(tgt.clone())
})
.await
};
retry(async || {
let mut buf = [0u8; 1028];
fs::File::open(&mount_src)
.await
.map_err(|e| format_err!("open {}: {e}", mount_src.display()))?
.read_exact(&mut buf)
.await
.map_err(|e| format_err!("read {}: {e}", mount_src.display()))?;
let fstype = if buf[1024..1028] == 0xE0F5E1E2u32.to_le_bytes() {
"erofs"
} else {
"squashfs"
};
mount(Some(&mount_src), &tgt_dir, fstype, None).await;
Ok(())
})
.await;
if !self.lower_dir.is_empty() {
self.lower_dir.push(':');
}
self.lower_dir.push_str(&tgt_dir.to_string_lossy());
}
}
async fn mount_system(cfg: &dkl::Config, bs_cfg: &Config, bs_dir: &str, verifier: &Verifier) {
let opts = match utils::param("root-opts") { let opts = match utils::param("root-opts") {
Some(s) => Some(s.to_string()), Some(s) => Some(s.to_string()),
None => default_root_tmpfs_opts(), None => default_root_tmpfs_opts(),
}; };
let mem_dir = "/mem"; let mem_dir = "/mem";
mount(None, mem_dir, "tmpfs", opts.as_deref()).await; mount(None::<&str>, mem_dir, "tmpfs", opts.as_deref()).await;
let layers_dir = &format!("{mem_dir}/layers"); let mut mounter = LayerMounter {
let mut lower_dir = String::new(); bs_dir,
layers_dir: &format!("{mem_dir}/layers"),
for layer in &cfg.layers { verifier,
let src = if layer == "modules" { lower_dir: String::new(),
"/modules.sqfs".to_string()
} else {
let p = format!("{bs_dir}/{layer}.fs");
retry(async || verifier.verify_path(&p).await).await;
p
}; };
let tgt = &format!("{mem_dir}/{layer}.fs"); if mounter.exists("merged").await {
retry(async || { mounter.mount("merged").await;
info!("copying layer {layer} from {src}"); } else {
fs::copy(&src, tgt).await?; for layer in &cfg.layers {
Ok(()) if layer == "modules" && bs_cfg.modules.is_some() {
}) continue; // take modules from initrd
.await;
let layer_dir = &format!("{layers_dir}/{layer}");
mount(Some(tgt), layer_dir, "squashfs", None).await;
if !lower_dir.is_empty() {
lower_dir.push(':');
} }
lower_dir.push_str(&layer_dir);
mounter.mount(layer).await;
}
}
if let Some(ref modules) = bs_cfg.modules {
mounter.mount_path(modules, "modules", false).await;
} }
let upper_dir = &format!("{mem_dir}/upper"); let upper_dir = &format!("{mem_dir}/upper");
@@ -228,15 +334,9 @@ async fn mount_system(cfg: &dkl::Config, bs_dir: &str, verifier: &Verifier) {
}) })
.await; .await;
mount( let lower_dir = &mounter.lower_dir;
None, let opts = format!("lowerdir={lower_dir},upperdir={upper_dir},workdir={work_dir}");
"/system", mount(None::<&str>, "/system", "overlay", Some(&opts)).await;
"overlay",
Some(&format!(
"lowerdir={lower_dir},upperdir={upper_dir},workdir={work_dir}"
)),
)
.await;
// make root rshared (default in systemd, required by Kubernetes 1.10+) // make root rshared (default in systemd, required by Kubernetes 1.10+)
// equivalent to "mount --make-rshared /" // equivalent to "mount --make-rshared /"
@@ -250,45 +350,51 @@ async fn mount_system(cfg: &dkl::Config, bs_dir: &str, verifier: &Verifier) {
.await; .await;
} }
fn chroot(root: &str, path: &str) -> String { struct MergedLayer<'t> {
format!("{root}/{}", path.trim_start_matches(|c| c == '/')) #[allow(unused)]
root_hash_sig: &'t [u8],
root_hash: &'t [u8],
data: &'t [u8],
hash: &'t [u8],
} }
async fn apply_files(files: &[dkl::File], root: &str) -> Result<()> { impl<'t> MergedLayer<'t> {
for file in files { fn from_bytes(mut src: &'t [u8]) -> Option<Self> {
let path = chroot(root, &file.path); let mut next = || {
let path = Path::new(&path); let (len, rem) = src.split_at_checked(8)?;
let len = u64::from_be_bytes(len.try_into().ok()?);
let (data, rem) = rem.split_at_checked(len as usize)?;
src = rem;
Some(data)
};
if let Some(parent) = path.parent() { Some(Self {
fs::create_dir_all(parent).await?; root_hash_sig: next()?,
root_hash: next()?,
data: next()?,
hash: next()?,
})
} }
use crate::dkl::FileKind as K; async fn create(&self, path: impl AsRef<Path>) -> std::io::Result<()> {
match &file.kind { let mut out = fs::File::create(path).await?;
K::Content(content) => fs::write(path, content.as_bytes()).await?, self.write_to(&mut out).await?;
K::Dir(true) => fs::create_dir(path).await?, out.shutdown().await
K::Dir(false) => {} // shouldn't happen, but semantic is to ignore
K::Symlink(tgt) => fs::symlink(tgt, path).await?,
}
match file.kind {
K::Symlink(_) => {}
_ => set_perms(path, file.mode).await?,
}
info!("created {}", file.path);
} }
async fn write_to(&self, mut out: impl AsyncWrite + Unpin) -> std::io::Result<()> {
out.write_all(self.data).await?;
out.write_all(self.hash).await?;
Ok(()) Ok(())
} }
async fn set_perms(path: impl AsRef<Path>, mode: Option<u32>) -> std::io::Result<()> { fn hash_offset(&self) -> usize {
if let Some(mode) = mode.filter(|m| *m != 0) { self.data.len()
use std::os::unix::fs::PermissionsExt; }
let mode = std::fs::Permissions::from_mode(mode);
fs::set_permissions(path, mode).await?; fn root_hash_hex(&self) -> String {
hex::encode(self.root_hash)
} }
Ok(())
} }
async fn apply_groups(groups: &[dkl::Group], root: &str) { async fn apply_groups(groups: &[dkl::Group], root: &str) {

View File

@@ -8,9 +8,9 @@ use tokio::sync::Mutex;
use super::{retry_or_ignore, USED_DEVS}; use super::{retry_or_ignore, USED_DEVS};
use crate::blockdev::{is_uninitialized, uninitialize}; use crate::blockdev::{is_uninitialized, uninitialize};
use crate::bootstrap::config::{CryptDev, DevFilter};
use crate::fs::walk_dir; use crate::fs::walk_dir;
use crate::input; use crate::input;
use dkl::bootstrap::{CryptDev, DevFilter};
pub async fn setup(devs: &[CryptDev]) { pub async fn setup(devs: &[CryptDev]) {
if devs.is_empty() { if devs.is_empty() {
@@ -29,7 +29,7 @@ pub async fn setup(devs: &[CryptDev]) {
let all_devs = walk_dir("/dev").await; let all_devs = walk_dir("/dev").await;
for dev in devs { for dev in devs {
let mut mappings = find_dev(dev, &all_devs); let mut mappings = find_dev(dev, &all_devs).await?;
mappings.retain(|(_, dev_path)| !used_devs.contains(dev_path)); mappings.retain(|(_, dev_path)| !used_devs.contains(dev_path));
if mappings.is_empty() && !dev.optional() && !done.contains(&dev.name) { if mappings.is_empty() && !dev.optional() && !done.contains(&dev.name) {
@@ -56,27 +56,72 @@ pub async fn setup(devs: &[CryptDev]) {
.await; .await;
} }
static PREV_PW: Mutex<String> = Mutex::const_new(String::new()); struct PrevPw {
pw: String,
async fn crypt_open(crypt_dev: &str, dev_path: &str) -> Result<()> { reuse: bool,
'open_loop: loop {
let mut prev_pw = PREV_PW.lock().await;
let prompt = if prev_pw.is_empty() {
format!("crypt password for {crypt_dev}? ")
} else {
format!("crypt password for {crypt_dev} (enter = reuse previous)? ")
};
let mut pw = input::read_password(prompt).await;
if pw.is_empty() {
pw = prev_pw.clone();
} }
impl PrevPw {
fn is_set(&self) -> bool {
!self.pw.is_empty()
}
fn can_reuse(&self) -> bool {
self.reuse && self.is_set()
}
fn invalidate(&mut self) {
self.pw = String::new();
self.reuse = false;
}
async fn input(&mut self, prompt: impl std::fmt::Display) -> String {
if self.can_reuse() {
info!("reusing password");
self.pw.clone()
} else if self.is_set() {
let pw =
input::read_password(format!("{prompt} (\"\" reuse, \"*\" auto-reuse)? ")).await;
match pw.as_str() {
"" => self.pw.clone(),
"*" => {
self.reuse = true;
self.pw.clone()
}
_ => {
self.pw = pw.clone();
pw
}
}
} else {
let pw = loop {
let pw = input::read_password(format!("{prompt}? ")).await;
if pw.is_empty() { if pw.is_empty() {
error!("empty password provided!"); error!("empty password provided!");
continue; continue;
} }
break pw;
};
*prev_pw = pw.clone(); self.pw = pw.clone();
pw
}
}
}
static PREV_PW: Mutex<PrevPw> = Mutex::const_new(PrevPw {
pw: String::new(),
reuse: false,
});
async fn crypt_open(crypt_dev: &str, dev_path: &str) -> Result<()> {
'open_loop: loop {
let mut prev_pw = PREV_PW.lock().await;
let pw = prev_pw
.input(format!("crypt password for {crypt_dev}"))
.await;
if cryptsetup(&pw, ["open", dev_path, crypt_dev]).await? { if cryptsetup(&pw, ["open", dev_path, crypt_dev]).await? {
return Ok(()); return Ok(());
@@ -107,10 +152,13 @@ async fn crypt_open(crypt_dev: &str, dev_path: &str) -> Result<()> {
} }
_ => unreachable!(), _ => unreachable!(),
} }
} else { }
// device looks initialized, don't allow format // device looks initialized, don't allow format
warn!("{dev_path} looks initialized, formatting not allowed from init"); warn!("{dev_path} looks initialized, formatting not allowed from init");
prev_pw.invalidate();
match input::read_choice(["[r]etry", "[i]gnore"]).await { match input::read_choice(["[r]etry", "[i]gnore"]).await {
'r' => continue 'open_loop, 'r' => continue 'open_loop,
'i' => return Ok(()), 'i' => return Ok(()),
@@ -118,7 +166,6 @@ async fn crypt_open(crypt_dev: &str, dev_path: &str) -> Result<()> {
} }
} }
} }
}
async fn cryptsetup<const N: usize>(pw: &str, args: [&str; N]) -> Result<bool> { async fn cryptsetup<const N: usize>(pw: &str, args: [&str; N]) -> Result<bool> {
let mut child = Command::new("cryptsetup") let mut child = Command::new("cryptsetup")
@@ -134,18 +181,39 @@ async fn cryptsetup<const N: usize>(pw: &str, args: [&str; N]) -> Result<bool> {
Ok(child.wait().await?.success()) Ok(child.wait().await?.success())
} }
fn find_dev(dev: &CryptDev, all_devs: &[String]) -> Vec<(String, String)> { async fn find_dev(dev: &CryptDev, all_devs: &[String]) -> Result<Vec<(String, String)>> {
let dev_name = &dev.name; let dev_name = &dev.name;
match dev.filter { Ok(match dev.filter() {
DevFilter::Dev(ref path) => (all_devs.iter()) DevFilter::None => vec![],
DevFilter::Dev(path) => (all_devs.iter())
.filter(|dev_path| dev_path == &path) .filter(|dev_path| dev_path == &path)
.map(|dev_path| (dev.name.clone(), dev_path.clone())) .map(|dev_path| (dev.name.clone(), dev_path.clone()))
.collect(), .collect(),
DevFilter::Prefix(ref prefix) => (all_devs.iter()) DevFilter::Prefix(prefix) => (all_devs.iter())
.filter_map(|path| { .filter_map(|path| {
let suffix = path.strip_prefix(prefix)?; let suffix = path.strip_prefix(prefix)?;
Some((format!("{dev_name}{suffix}"), path.clone())) Some((format!("{dev_name}{suffix}"), path.clone()))
}) })
.collect(), .collect(),
DevFilter::Udev(filter) => {
use crate::udev;
let devs = udev::all().await?;
let filter: udev::Filter = filter.clone().into();
(devs.iter())
.filter(|dev| dev.subsystem() == Some("block") && filter.matches(dev))
.filter_map(|dev| {
let path = dev.property("DEVNAME")?.to_string();
let mut name = dev_name.replace("${name}", dev.name()?);
for (p, v) in dev.properties() {
name = name.replace(&format!("${{{p}}}"), v);
} }
Some((name, path))
})
.collect()
}
})
} }

View File

@@ -3,9 +3,9 @@ use log::{error, info, warn};
use tokio::process::Command; use tokio::process::Command;
use super::{exec, retry, retry_or_ignore, USED_DEVS}; use super::{exec, retry, retry_or_ignore, USED_DEVS};
use crate::bootstrap::config::{Config, Filesystem, LvSize, LvmLV, LvmVG, TAKE_ALL};
use crate::fs::walk_dir; use crate::fs::walk_dir;
use crate::{blockdev, lvm}; use crate::{blockdev, lvm};
use dkl::bootstrap::{Config, Filesystem, LvSize, LvmLV, LvmPV, LvmVG, TAKE_ALL};
pub async fn setup(cfg: &Config) { pub async fn setup(cfg: &Config) {
if cfg.lvm.is_empty() { if cfg.lvm.is_empty() {
@@ -73,24 +73,12 @@ async fn setup_vg(vg: &LvmVG) -> Result<()> {
info!("setting up LVM VG {vg_name} ({dev_done}/{dev_needed} devices configured)"); info!("setting up LVM VG {vg_name} ({dev_done}/{dev_needed} devices configured)");
} }
let regexps: Vec<regex::Regex> = (vg.pvs.regexps.iter()) let matching_devs = find_devs(&vg.pvs).await?;
.filter_map(|re_str| {
(re_str.parse())
.inspect_err(|e| error!("invalid regex ignored: {re_str:?}: {e}"))
.ok()
})
.collect();
let mut used_devs = USED_DEVS.lock().await;
let matching_devs = (walk_dir("/dev").await.into_iter())
.filter(|path| !used_devs.contains(path.as_str()))
.filter(|path| regexps.iter().any(|re| re.is_match(path)));
let devs: Vec<_> = if dev_needed == TAKE_ALL { let devs: Vec<_> = if dev_needed == TAKE_ALL {
matching_devs.collect() matching_devs
} else { } else {
matching_devs.take(missing_count!()).collect() matching_devs.into_iter().take(missing_count!()).collect()
}; };
let cmd = if dev_done == 0 { let cmd = if dev_done == 0 {
@@ -109,7 +97,7 @@ async fn setup_vg(vg: &LvmVG) -> Result<()> {
} }
dev_done += devs.len(); dev_done += devs.len();
used_devs.extend(devs); USED_DEVS.lock().await.extend(devs);
if dev_needed != TAKE_ALL && dev_done < (dev_needed as usize) { if dev_needed != TAKE_ALL && dev_done < (dev_needed as usize) {
return Err(format_err!( return Err(format_err!(
@@ -213,3 +201,33 @@ async fn install_package(pkg: &str) -> Result<()> {
Err(format_err!("failed to install package {pkg}: {status}")) Err(format_err!("failed to install package {pkg}: {status}"))
} }
} }
async fn find_devs(pvs: &LvmPV) -> Result<Vec<String>> {
let mut results = if let Some(ref filter) = pvs.udev {
use crate::udev;
let filter: udev::Filter = filter.clone().into();
(udev::all().await?.iter())
.filter(|dev| dev.subsystem() == Some("block") && filter.matches(dev))
.filter_map(|dev| dev.property("DEVNAME").map(|s| s.to_string()))
.collect()
} else if !pvs.regexps.is_empty() {
let regexps: Vec<regex::Regex> = (pvs.regexps.iter())
.filter_map(|re_str| {
(re_str.parse())
.inspect_err(|e| error!("invalid regex ignored: {re_str:?}: {e}"))
.ok()
})
.collect();
(walk_dir("/dev").await.into_iter())
.filter(|path| regexps.iter().any(|re| re.is_match(path)))
.collect()
} else {
warn!("no device filters, no matches");
vec![]
};
let used_devs = USED_DEVS.lock().await;
results.retain(|path| !used_devs.contains(path.as_str()));
Ok(results)
}

View File

@@ -3,12 +3,12 @@ use log::{info, warn};
use std::collections::BTreeSet as Set; use std::collections::BTreeSet as Set;
use tokio::process::Command; use tokio::process::Command;
use super::{format_err, retry_or_ignore, Config, Result}; use super::{format_err, retry_or_ignore, Result};
use crate::{ use crate::{
bootstrap::config,
udev, udev,
utils::{select_n_by_regex, NameAliases}, utils::{select_n_by_regex, select_n_by_udev, NameAliases},
}; };
use dkl::bootstrap::{Config, Network};
pub async fn setup(cfg: &Config) { pub async fn setup(cfg: &Config) {
if cfg.networks.is_empty() { if cfg.networks.is_empty() {
@@ -23,19 +23,16 @@ pub async fn setup(cfg: &Config) {
} }
} }
async fn setup_network(net: &config::Network, assigned: &mut Set<String>) -> Result<()> { async fn setup_network(net: &Network, assigned: &mut Set<String>) -> Result<()> {
info!("setting up network {}", net.name); info!("setting up network {}", net.name);
let netdevs = get_interfaces()? let netdevs = (get_interfaces().await?)
.filter(|dev| !assigned.contains(dev.name())) .filter(|dev| !assigned.contains(dev.name()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
for dev in &netdevs { for dev in &netdevs {
info!( let names = [dev.name()].into_iter().chain(dev.aliases()).join(", ");
"- available network device: {}, aliases [{}]", info!("- available network device: {}", names);
dev.name(),
dev.aliases().join(", ")
);
} }
let mut cmd = Command::new("ash"); let mut cmd = Command::new("ash");
@@ -47,8 +44,19 @@ async fn setup_network(net: &config::Network, assigned: &mut Set<String>) -> Res
for iface in &net.interfaces { for iface in &net.interfaces {
let var = &iface.var; let var = &iface.var;
let if_names = if let Some(ref udev_filter) = iface.udev {
select_n_by_udev(
iface.n,
"net",
"INTERFACE",
&udev_filter.clone().into(),
&assigned,
)
.await?
} else {
let netdevs = netdevs.iter().filter(|na| !assigned.contains(na.name())); let netdevs = netdevs.iter().filter(|na| !assigned.contains(na.name()));
let if_names = select_n_by_regex(iface.n, &iface.regexps, netdevs); select_n_by_regex(iface.n, &iface.regexps, netdevs)
};
if if_names.is_empty() { if if_names.is_empty() {
return Err(format_err!("- no interface match for {var:?}")); return Err(format_err!("- no interface match for {var:?}"));
@@ -71,24 +79,20 @@ async fn setup_network(net: &config::Network, assigned: &mut Set<String>) -> Res
Ok(()) Ok(())
} }
fn get_interfaces() -> Result<impl Iterator<Item = NameAliases>> { async fn get_interfaces() -> Result<impl Iterator<Item = NameAliases>> {
Ok(udev::get_devices("net")?.into_iter().map(|dev| { let nas: Vec<_> = (udev::all().await?.of_subsystem("net"))
let mut na = NameAliases::new(dev.sysname().to_string()); .filter_map(|dev| {
let name = dev.property("INTERFACE")?;
let mut na = NameAliases::new(name.to_string());
for (property, value) in dev.properties() { for (p, v) in dev.properties() {
if [ if p.starts_with("ID_NET_NAME") {
"INTERFACE", na.push(v.to_string());
"ID_NET_NAME",
"ID_NET_NAME_PATH",
"ID_NET_NAME_MAC",
"ID_NET_NAME_SLOT",
]
.contains(&property)
{
na.push(value.to_string());
} }
} }
na Some(na)
})) })
.collect();
Ok(nas.into_iter())
} }

View File

@@ -7,7 +7,7 @@ use tokio::net;
use tokio::process::Command; use tokio::process::Command;
use super::retry_or_ignore; use super::retry_or_ignore;
use crate::bootstrap::config::{Config, SSHServer}; use dkl::bootstrap::{Config, SSHServer};
pub async fn start(cfg: &Config) { pub async fn start(cfg: &Config) {
retry_or_ignore(async || { retry_or_ignore(async || {

View File

@@ -2,6 +2,10 @@ use crate::input;
pub async fn run() { pub async fn run() {
tokio::spawn(async { tokio::spawn(async {
// give a bit of time for stdout
use tokio::time::{sleep, Duration};
sleep(Duration::from_millis(200)).await;
if let Err(e) = input::forward_requests_from_socket().await { if let Err(e) = input::forward_requests_from_socket().await {
eprintln!("failed to forwards requests from socket: {e}"); eprintln!("failed to forwards requests from socket: {e}");
std::process::exit(1); std::process::exit(1);

View File

@@ -1,61 +0,0 @@
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Config {
pub layers: Vec<String>,
pub root_user: RootUser,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub mounts: Vec<Mount>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub files: Vec<File>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<Group>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub users: Vec<User>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct RootUser {
#[serde(skip_serializing_if = "Option::is_none")]
pub password_hash: Option<String>,
pub authorized_keys: Vec<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Mount {
pub r#type: Option<String>,
pub dev: String,
pub path: String,
pub options: Option<String>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Group {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub gid: Option<u32>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct User {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub uid: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gid: Option<u32>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct File {
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<u32>,
#[serde(flatten)]
pub kind: FileKind,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
pub enum FileKind {
Content(String),
Symlink(String),
Dir(bool),
}

View File

@@ -37,7 +37,7 @@ impl Log {
} }
} }
pub fn subscribe(&self) -> LogWatch { pub fn subscribe(&self) -> LogWatch<'_> {
LogWatch { LogWatch {
log: self, log: self,
pos: 0, pos: 0,

View File

@@ -3,7 +3,7 @@ use std::fmt::Display;
use std::sync::{Arc, LazyLock}; use std::sync::{Arc, LazyLock};
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt, BufReader}; use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::net; use tokio::net;
use tokio::sync::{oneshot, watch, Mutex}; use tokio::sync::{Mutex, oneshot, watch};
pub async fn read_line(prompt: impl Display) -> String { pub async fn read_line(prompt: impl Display) -> String {
read(prompt, false).await read(prompt, false).await

View File

@@ -1,11 +1,9 @@
pub mod bootstrap; pub mod blockdev;
pub mod cmd; pub mod cmd;
pub mod dklog;
pub mod fs;
pub mod input;
pub mod lsblk; pub mod lsblk;
pub mod lvm; pub mod lvm;
pub mod udev; pub mod udev;
pub mod utils; pub mod utils;
pub mod input;
pub mod blockdev;
pub mod fs;
pub mod dkl;
pub mod dklog;

View File

@@ -1,4 +1,4 @@
use eyre::{format_err, Result}; use eyre::{Result, format_err};
use tokio::process::Command; use tokio::process::Command;
#[derive(Debug, serde::Deserialize, serde::Serialize)] #[derive(Debug, serde::Deserialize, serde::Serialize)]

View File

@@ -63,3 +63,148 @@ pub fn get_devices(class: &str) -> Result<Vec<Device>> {
Ok(devices) Ok(devices)
} }
pub async fn all() -> Result<Devs> {
let output = tokio::process::Command::new("udevadm")
.args(["info", "-e"])
.stderr(std::process::Stdio::inherit())
.output()
.await?;
if !output.status.success() {
return Err(eyre::format_err!("udevadm failed: {}", output.status));
}
Ok(Devs {
data: unsafe { String::from_utf8_unchecked(output.stdout) },
})
}
pub async fn by_path(path: &str) -> Result<Devs> {
let output = tokio::process::Command::new("udevadm")
.args(["info", path])
.stderr(std::process::Stdio::inherit())
.output()
.await?;
if !output.status.success() {
return Err(eyre::format_err!("udevadm failed: {}", output.status));
}
Ok(Devs {
data: unsafe { String::from_utf8_unchecked(output.stdout) },
})
}
pub struct Devs {
data: String,
}
impl<'t> Devs {
pub fn iter(&'t self) -> impl Iterator<Item = Dev<'t>> {
self.data
.split("\n\n")
.filter(|s| !s.is_empty())
.map(|s| Dev(s))
}
pub fn of_subsystem(&'t self, subsystem: &str) -> impl Iterator<Item = Dev<'t>> {
self.iter().filter(|dev| dev.subsystem() == Some(subsystem))
}
}
pub struct Dev<'t>(&'t str);
impl<'t> Dev<'t> {
pub fn raw(&self) -> &str {
self.0
}
// alpine's udev prefixes we've seen:
// - P: Device path in /sys/
// - N: Kernel device node name
// - S: Device node symlink
// - L: Device node symlink priority [ignored]
// - E: Device property
fn by_prefix(&self, prefix: &'static str) -> impl Iterator<Item = &str> {
self.0.lines().filter_map(move |l| l.strip_prefix(prefix))
}
/// Device path in /sys/
pub fn path(&self) -> Option<&str> {
self.by_prefix("P: ").next()
}
/// Kernel device node name
pub fn name(&self) -> Option<&str> {
self.by_prefix("N: ").next()
}
/// Device node symlinks
pub fn symlinks(&self) -> Vec<&str> {
self.by_prefix("S: ").collect()
}
/// Device properties
pub fn properties(&self) -> impl Iterator<Item = (&str, &str)> {
self.by_prefix("E: ").filter_map(|s| s.split_once("="))
}
/// Device property
pub fn property(&self, name: &str) -> Option<&str> {
self.properties()
.filter_map(|(n, v)| (n == name).then_some(v))
.next()
}
/// Device subsystem
pub fn subsystem(&self) -> Option<&str> {
self.property("SUBSYSTEM")
}
}
pub enum Filter {
Has(String),
Eq(String, String),
Glob(String, glob::Pattern),
And(Vec<Filter>),
Or(Vec<Filter>),
Not(Box<Filter>),
False,
}
impl<'t> Filter {
pub fn matches(&self, dev: &Dev) -> bool {
match self {
Self::False => false,
Self::Has(k) => dev.property(k).is_some(),
Self::Eq(k, v) => dev.properties().any(|kv| kv == (k, v)),
Self::Glob(k, pattern) => dev
.properties()
.any(|(pk, pv)| pk == k && pattern.matches(pv)),
Self::And(ops) => ops.iter().all(|op| op.matches(dev)),
Self::Or(ops) => ops.iter().any(|op| op.matches(dev)),
Self::Not(op) => !op.matches(dev),
}
}
}
impl<'t> Into<Filter> for dkl::bootstrap::UdevFilter {
fn into(self) -> Filter {
match self {
Self::Has(p) => Filter::Has(p),
Self::Eq(p, v) => Filter::Eq(p, v),
Self::Glob(p, pattern) => match glob::Pattern::new(&pattern) {
Ok(pattern) => Filter::Glob(p, pattern),
Err(e) => {
warn!("pattern {pattern:?} will never match: {e}");
Filter::False
}
},
Self::And(ops) => Filter::And(ops.into_iter().map(Self::into).collect()),
Self::Or(ops) => Filter::Or(ops.into_iter().map(Self::into).collect()),
Self::Not(op) => Filter::Not(Box::new((*op).into())),
}
}
}

View File

@@ -2,6 +2,8 @@ use log::error;
use std::collections::BTreeSet as Set; use std::collections::BTreeSet as Set;
use std::sync::LazyLock; use std::sync::LazyLock;
use crate::udev;
static CMDLINE: LazyLock<String> = LazyLock::new(|| { static CMDLINE: LazyLock<String> = LazyLock::new(|| {
std::fs::read("/proc/cmdline") std::fs::read("/proc/cmdline")
.inspect_err(|e| error!("failed to read kernel cmdline: {e}")) .inspect_err(|e| error!("failed to read kernel cmdline: {e}"))
@@ -88,3 +90,24 @@ pub fn select_n_by_regex<'t>(
nas.take(n as usize).collect() nas.take(n as usize).collect()
} }
} }
pub async fn select_n_by_udev<'t>(
n: i16,
subsystem: &str,
result_property: &str,
filter: &udev::Filter,
in_use: &Set<String>,
) -> eyre::Result<Vec<String>> {
let devs = udev::all().await?;
let nas = devs
.of_subsystem(subsystem)
.filter(|dev| filter.matches(dev))
.filter_map(|dev| Some(dev.property(result_property)?.to_string()))
.filter(|name| !in_use.contains(name));
Ok(if n == -1 {
nas.collect()
} else {
nas.take(n as usize).collect()
})
}

View File

@@ -21,15 +21,13 @@ auths:
sshKey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICkpbU6sf4t0f6XAv9DuW3XH5iLM0AI5rc8PT2jwea1N sshKey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICkpbU6sf4t0f6XAv9DuW3XH5iLM0AI5rc8PT2jwea1N
password: bXlzZWVk:HMSxrg1cYphaPuUYUbtbl/htep/tVYYIQAuvkNMVpw0 # mypass password: bXlzZWVk:HMSxrg1cYphaPuUYUbtbl/htep/tVYYIQAuvkNMVpw0 # mypass
signer_public_key: MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQA29glSqk7MqoUIjD+UQG+b4v59pTFkn8rYtNhOftTe7uiLUvGFsjNdzP3tW64t/c6YD2p6dtI3oQXGOVQO1vIWPEBc6Sq++BRpQ0FVna+dgNQx8/kLXN9Na0ZYbK7q0haCI7/EHWOX79JFFxJE9HJ67AOMmXwGJ2jrfa1CUnWvfCmT+E=
ssh: ssh:
listen: "[::]:22" listen: "[::]:22"
user_ca: /user_ca.pub user_ca: /user_ca.pub
networks: networks:
- name: loopback - name: loopback
interfaces: [ { var: iface, n: 1, regexps: [ "^lo$" ] } ] interfaces: [ { var: iface, n: 1, udev: !eq [INTERFACE, lo] } ]
script: | script: |
ip a add 127.0.0.1/8 dev lo ip a add 127.0.0.1/8 dev lo
ip a add ::1/128 dev lo ip a add ::1/128 dev lo
@@ -38,31 +36,27 @@ networks:
interfaces: interfaces:
- var: iface - var: iface
n: 1 n: 1
regexps: udev: !has ID_NET_NAME_MAC
- eth.*
- veth.*
- eno.*
- enp.*
script: | script: |
ip li set $iface up ip li set $iface up
udhcpc -i $iface -b -t1 -T1 -A5 || ip a add 192.168.12.42/24 dev $iface
ip a add 2001:41d0:306:168f::1337:2eed/64 dev $iface ip a add fd12:6e76:7474::1337:2eed/64 dev $iface
ip route add default via 192.168.12.254
ip route add default via fd12:6e76:7474::1 dev $iface
pre_lvm_crypt: #pre_lvm_crypt:
- dev: /dev/vda #- name: sys-${name}
name: sys0 # udev: !glob [ DEVNAME, /dev/vd* ]
- dev: /dev/vdb
name: sys1
lvm: lvm:
- vg: storage - vg: storage
pvs: pvs:
n: 2 n: 2
regexps: regexps:
- /dev/mapper/sys[01] #- ^/dev/mapper/sys-
# to match full disks # to match full disks
#- /dev/nvme[0-9]+n[0-9]+ #- /dev/nvme[0-9]+n[0-9]+
#- /dev/vd[a-z]+ - /dev/vd[a-z]+
#- /dev/sd[a-z]+ #- /dev/sd[a-z]+
#- /dev/hd[a-z]+ #- /dev/hd[a-z]+
# to match partitions: # to match partitions:
@@ -78,11 +72,16 @@ lvm:
lvs: lvs:
- name: bootstrap - name: bootstrap
size: 2g size: 1g
- name: varlog - name: varlog
extents: 10%FREE size: 256m
# size: 10g - name: kubelet
size: 256m
- name: containerd
size: 1g
- name: etcd
size: 256m
- name: podman - name: podman
extents: 10%FREE extents: 10%FREE
@@ -96,11 +95,8 @@ lvm:
#- dev: /dev/storage/bootstrap #- dev: /dev/storage/bootstrap
#- dev: /dev/storage/dls #- dev: /dev/storage/dls
signer_public_key: 'MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBe6Y3zGQUIHvVXoS5GI8irY8yoB0ozFpzn/cUykA46TkHdJ8xCEaaM1MpqMrfWgDtP/rA2KeE9HjVerLnEFD01uUAUh4/OYgCBDYJPhridVDoC78KOJpkWBj7Shl0Rp0AtETvatNPa1RRe15V7nDF/Nm75Y6O3IL29lYPQ6jqEGhR810='
bootstrap: bootstrap:
#dev: /dev/mapper/bootstrap
dev: /dev/storage/bootstrap dev: /dev/storage/bootstrap
# TODO seed: https://direktil.novit.io/bootstraps/dls-crypt seed: http://192.168.12.254:7606/public/download-set/host/m1/bootstrap.tar?set=ICIXKJJWA6U4RQESD3KQMWO3IBW6THG4FJUM2HUNFPTIODVSXGDPXTCHSFT6IOUZO6LBAG65QIGYUMIZA3TEHTPB6BXKUFONNUWKUWAJAQRE2GDEOC4RWAAAQA3DSZJXMNSDGN34NA5G2MJ2MJXW65DTORZGC4BOORQXEAAAAAACMICVFM
seed: http://192.168.10.254:7606/hosts/m1/bootstrap.tar
# TODO seed_sign_key: "..."
# TODO load_and_close: true

Binary file not shown.

Binary file not shown.