add dynlay

This commit is contained in:
Mikaël Cluseau
2025-07-21 01:41:03 +02:00
parent 52c23653ac
commit 7d02d8f932
7 changed files with 731 additions and 24 deletions

View File

@ -40,10 +40,31 @@ enum Command {
/// Path where the logs are stored
#[arg(long, short = 'p', default_value = "/var/log", env = "DKL_LOG_PATH")]
log_path: String,
/// Name of the log set to operate on.
log_name: String,
#[command(subcommand)]
op: LogOp,
},
Dynlay {
layer: String,
version: String,
#[arg(
long,
short = 'u',
default_value = "https://dkl.novit.io/dist/layers",
env = "DKL_DYNLAY_URL"
)]
url_prefix: String,
#[arg(
long,
short = 'd',
default_value = "/opt/dynlay",
env = "DKL_DYNLAY_DIR"
)]
layers_dir: String,
#[arg(long, default_value = "/")]
chroot: std::path::PathBuf,
},
}
#[tokio::main(flavor = "current_thread")]
@ -90,6 +111,21 @@ async fn main() -> Result<()> {
log_name,
op,
} => op.run(&log_path, &log_name).await,
C::Dynlay {
ref layer,
ref version,
ref url_prefix,
ref layers_dir,
chroot,
} => {
dkl::dynlay::Dynlay {
url_prefix,
layers_dir,
chroot,
}
.install(layer, version)
.await
}
}
}

165
src/dynlay.rs Normal file
View File

@ -0,0 +1,165 @@
use eyre::{format_err, Result};
use log::{debug, error, info, warn};
use std::path::PathBuf;
use tokio::{fs, io::AsyncWriteExt, process::Command};
use crate::fs::spawn_walk_dir;
pub struct Dynlay<'t> {
pub url_prefix: &'t str,
pub layers_dir: &'t str,
pub chroot: PathBuf,
}
impl<'t> Dynlay<'t> {
pub async fn install(&self, layer: &str, version: &str) -> Result<()> {
let lay_dir = &format!("{base}/{layer}", base = self.layers_dir);
debug!("mkdir -p {lay_dir}");
fs::create_dir_all(lay_dir).await?;
let lay_path = &format!("{lay_dir}/{version}");
if !fs::try_exists(lay_path).await? {
let part_file = &format!("{lay_dir}/{version}.tmp");
self.fetch(layer, version, part_file).await?;
(fs::rename(part_file, lay_path).await)
.map_err(|e| format_err!("failed mv {part_file} {lay_path}: {e}"))?;
}
let mount_path = PathBuf::from(lay_dir).join("mounts").join(layer);
let mount_path_str = mount_path.to_string_lossy().into_owned();
(fs::create_dir_all(&mount_path).await)
.map_err(|e| format_err!("mkdir -p {mount_path:?} failed: {e}"))?;
let mount_path = &fs::canonicalize(mount_path).await?;
let mut mount_info = rsmount::tables::MountInfo::new()?;
mount_info.import_mountinfo()?;
if mount_info.find_target(mount_path).is_some() {
info!("clearing previous mount");
let mut paths = spawn_walk_dir(mount_path.clone());
while let Some(result) = paths.recv().await {
let Ok((path, md)) = result else {
continue;
};
if !md.is_dir() {
let path = self.chroot.join(&path);
debug!("rm {path:?}");
if let Err(e) = fs::remove_file(&path).await {
warn!("rm {path:?} failed: {e}");
}
}
}
sudo("umount", &[mount_path]).await?;
}
// mount layer
info!("mounting layer");
sudo("mount", &["-t", "squashfs", lay_path, &mount_path_str]).await?;
let mut paths = spawn_walk_dir(mount_path.clone());
while let Some(result) = paths.recv().await {
let Ok((path, md)) = result else {
continue;
};
let target = self.chroot.join(&path);
if md.is_dir() {
debug!("mkdir -p {target:?}");
if let Err(e) = fs::create_dir_all(&target).await {
error!("mkdir -p {target:?} failed: {e}");
}
} else {
let _ = fs::remove_file(&target).await;
let source = mount_path.join(&path);
debug!("ln -s {source:?} {target:?}");
if let Err(e) = fs::symlink(&source, &target).await {
error!("ln -s {source:?} {target:?} failed: {e}");
}
}
}
Ok(())
}
async fn fetch(&self, layer: &str, version: &str, part_file: &str) -> Result<()> {
let url = &format!("{}/{layer}/{version}", self.url_prefix);
info!("fetching {url}");
let mut out = (fs::File::create(part_file).await)
.map_err(|e| format_err!("failed to open {part_file}: {e}"))?;
let resp = reqwest::get(url).await?;
if !resp.status().is_success() {
return Err(format_err!("fetch failed: {}", resp.status()));
}
let sha1 = (resp.headers().get("x-content-sha1"))
.ok_or(format_err!("no content hash in response"))?;
let sha1 = (sha1.to_str()).map_err(|e| format_err!("invalid sha1: {e}"))?;
debug!("content sha1: {sha1}");
let mut exp_sha1 = [0; 20];
hex::decode_to_slice(sha1, &mut exp_sha1).map_err(|e| format_err!("invalid sha1: {e}"))?;
let mut hash = openssl::sha::Sha1::new();
use futures::StreamExt;
let mut stream = resp.bytes_stream();
while let Some(bytes) = stream.next().await {
let bytes = bytes.map_err(|e| format_err!("remote read error: {e}"))?;
hash.update(&bytes);
(out.write_all(&bytes).await).map_err(|e| format_err!("local write error: {e}"))?;
}
(out.flush().await).map_err(|e| format_err!("local write error: {e}"))?;
drop(out);
let dl_sha1 = hash.finish();
if dl_sha1 != exp_sha1 {
if let Err(e) = fs::remove_file(part_file).await {
error!("failed to remove {part_file}: {e}");
}
return Err(format_err!(
"invalid content hash: expected {exp}, got {got}",
exp = hex::encode(exp_sha1),
got = hex::encode(dl_sha1)
));
}
Ok(())
}
}
async fn sudo<I, S>(program: &str, args: I) -> Result<()>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
let mut cmd = if nix::unistd::geteuid().is_root() {
let mut cmd = Command::new(program);
cmd.args(args);
cmd
} else {
let mut cmd = Command::new("sudo");
cmd.arg(program).args(args);
cmd
};
let status = cmd.status().await?;
if status.success() {
Ok(())
} else {
Err(format_err!("{program} failed: {status}"))
}
}

60
src/fs.rs Normal file
View File

@ -0,0 +1,60 @@
use eyre::Result;
use std::fs::Metadata;
use std::path::PathBuf;
use tokio::fs::read_dir;
use tokio::sync::mpsc;
pub fn spawn_walk_dir(
dir: impl Into<PathBuf> + Send + 'static,
) -> mpsc::Receiver<Result<(PathBuf, Metadata)>> {
let (tx, rx) = mpsc::channel(1);
tokio::spawn(walk_dir(dir, tx));
rx
}
pub async fn walk_dir(dir: impl Into<PathBuf>, tx: mpsc::Sender<Result<(PathBuf, Metadata)>>) {
let dir: PathBuf = dir.into();
let mut todo = std::collections::LinkedList::new();
if let Ok(rd) = read_dir(&dir).await {
todo.push_front(rd);
}
while let Some(rd) = todo.front_mut() {
let entry = match rd.next_entry().await {
Ok(v) => v,
Err(e) => {
if tx.send(Err(e.into())).await.is_err() {
return;
}
todo.pop_front(); // skip dir on error
continue;
}
};
let Some(entry) = entry else {
todo.pop_front();
continue;
};
let Ok(md) = entry.metadata().await else {
continue;
};
let is_dir = md.is_dir();
let Ok(path) = entry.path().strip_prefix(&dir).map(|p| p.to_path_buf()) else {
continue; // sub-entry not in dir, weird but semantically, we ignore
};
if tx.send(Ok((path, md))).await.is_err() {
return;
}
// recurse in sub directories
if is_dir {
if let Ok(rd) = read_dir(entry.path()).await {
todo.push_front(rd);
}
}
}
}

View File

@ -2,6 +2,8 @@ pub mod apply;
pub mod bootstrap;
pub mod dls;
pub mod logger;
pub mod dynlay;
pub mod fs;
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Config {