daemon w/ tests PoC

This commit is contained in:
Qyriad 2026-03-22 17:15:04 +01:00
parent 68fc04a6d2
commit 7f4a5a35ca
13 changed files with 498 additions and 81 deletions

10
Cargo.lock generated
View file

@ -223,6 +223,7 @@ dependencies = [
"tracing",
"tracing-human-layer",
"tracing-subscriber",
"which",
]
[[package]]
@ -851,6 +852,15 @@ version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "which"
version = "8.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81995fafaaaf6ae47a7d0cc83c67caf92aeb7e5331650ae6ff856f7c0c60c459"
dependencies = [
"libc",
]
[[package]]
name = "windows"
version = "0.61.3"

View file

@ -47,6 +47,7 @@ tap = "1.0.1"
tracing = { version = "0.1.44", features = ["attributes"] }
tracing-human-layer = "0.2.1"
tracing-subscriber = { version = "0.3.22", default-features = false, features = ["std", "env-filter", "fmt", "ansi", "registry", "parking_lot"] }
which = "8.0.2"
[profile.dev]
opt-level = 1
@ -63,3 +64,21 @@ opt-level = 1
[profile.release.package."*"]
debug = true
debug-assertions = true
[lints.clippy]
#arithmetic_side_effects = "warn"
as_ptr_cast_mut = "warn"
assigning_clones = "warn"
borrow_as_ptr = "warn"
#cargo_common_metadata = "warn"
cast_lossless = "warn"
#cast_possible_truncation = "warn"
cast_possible_wrap = "warn"
cast_ptr_alignment = "warn"
cast_sign_loss = "warn"
clear_with_drain = "warn"
coerce_container_to_any = "warn"
derive_partial_eq_without_eq = "warn"
doc_broken_link = "warn"
doc_comment_double_space_linebreaks = "warn"
doc_markdown = "warn"

View file

@ -9,18 +9,28 @@
in import src { inherit pkgs; },
}: let
inherit (qpkgs) lib;
dynix = (qpkgs.callPackage ./package.nix { })
# Use LLD for faster link times.
defaultStdenv = pkgs.clangStdenv.override {
cc = pkgs.clangStdenv.cc.override {
bintools = pkgs.wrapBintoolsWith { inherit (pkgs.llvmPackages) bintools; };
};
};
dynix = (qpkgs.callPackage ./package.nix { clangStdenv = defaultStdenv; })
.overrideAttrs (final: prev: {
dynixCommand = qpkgs.stdlib.mkStdenvPretty prev.dynixCommand;
dynixModules = qpkgs.stdlib.mkStdenvPretty prev.dynixModules;
})
|> qpkgs.stdlib.mkStdenvPretty;
byStdenv = lib.mapAttrs (stdenvName: stdenv: let
withStdenv = dynix.override { clangStdenv = stdenv; };
dynix' = withStdenv.overrideAttrs (prev: {
pname = "${prev.pname}-${stdenvName}";
});
in dynix') qpkgs.validStdenvs;
in dynix.overrideAttrs (prev: lib.recursiveUpdate prev {
passthru = { inherit byStdenv; };
})

View file

@ -7,23 +7,14 @@
clangStdenv,
callPackage,
linkFarm,
llvmPackages,
rustHooks,
rustPackages,
versionCheckHook,
wrapBintoolsWith,
}: lib.callWith' rustPackages ({
rustPlatform,
cargo,
}: let
# Use LLD for faster link times.
stdenv = clangStdenv.override {
cc = clangStdenv.cc.override {
bintools = wrapBintoolsWith {
bintools = llvmPackages.bintools;
};
};
};
stdenv = clangStdenv;
cargoToml = lib.importTOML ./Cargo.toml;
cargoPackage = cargoToml.package;
in stdenv.mkDerivation (finalAttrs: let

View file

@ -4,3 +4,13 @@
match_block_trailing_comma = true
merge_derives = false
# Unstable options.
blank_lines_upper_bound = 3
format_code_in_doc_comments = true
format_macro_matchers = true
# When structs, slices, arrays, and block/array-like macros are used as the last argument in an expression list,
# allow them to overflow (like blocks/closures) instead of being indented on a new line.
overflow_delimited_expr = true
# Put `type` and `const` items before methods.
reorder_impl_items = true

View file

@ -16,7 +16,21 @@
fenixLib ? let
src = fetchTarball "https://github.com/nix-community/fenix/archive/main.tar.gz";
in import src { inherit pkgs; },
fenixToolchain ? fenixLib.latest.toolchain,
fenixBaseToolchain ? fenixLib.stable.withComponents [
"cargo"
"rustc"
"llvm-tools"
"rust-std"
"rust-docs"
"rust-src"
"rustc-dev"
"clippy"
],
fenixToolchain ? fenixLib.combine [
fenixBaseToolchain
# Rustfmt is very handy to have as nightly.
fenixLib.latest.rustfmt
],
}: let
inherit (pkgs) lib;

View file

@ -1,6 +1,7 @@
use std::{
env, io,
os::fd::{AsFd, BorrowedFd, IntoRawFd, OwnedFd, RawFd},
process::{Command, Stdio},
sync::{
Arc, LazyLock,
atomic::{AtomicUsize, Ordering},
@ -12,7 +13,20 @@ use iddqd::{BiHashMap, IdOrdMap};
use mio::{Events, Interest, Poll, Token, event::Event, net::UnixListener, unix::SourceFd};
use rustix::{buffer::spare_capacity, net::SocketFlags, process::Uid};
use rustix::{
buffer::spare_capacity,
net::SocketFlags,
process::{Pid, PidfdFlags, Uid, WaitId, WaitIdOptions},
};
mod rustix {
pub use rustix::process::{getuid, pidfd_open, waitid};
pub use rustix::*;
}
//mod rustix_prelude {
// pub use rustix::process::{getuid, pidfd_open, waitid};
//}
use serde_json::StreamDeserializer;
@ -21,10 +35,7 @@ use crate::prelude::*;
pub mod api;
use api::DaemonCmd;
use crate::{
SourceFile, SourceLine,
daemon_tokfd::{FdInfo, FdKind},
};
use crate::daemon_tokfd::{FdInfo, FdKind};
use crate::{OwnedFdWithFlags, TokenFd};
@ -45,6 +56,22 @@ pub static TMPDIR: LazyLock<&'static Path> = LazyLock::new(|| {
Box::leak(dir)
});
pub static NIXOS_REBUILD: LazyLock<&'static Path> = LazyLock::new(|| {
which::which("nixos-rebuild")
.inspect_err(|e| error!("couldn't find `nixos-rebuild` in PATH: {e}"))
.map(PathBuf::into_boxed_path)
.map(|boxed| &*Box::leak(boxed))
.unwrap_or(Path::new("/run/current-system/sw/bin/nixos-rebuild"))
});
pub static NIX: LazyLock<&'static Path> = LazyLock::new(|| {
which::which("nix")
.inspect_err(|e| error!("couldn't find `nix` in PATH: {e}"))
.map(PathBuf::into_boxed_path)
.map(|boxed| &*Box::leak(boxed))
.unwrap_or(Path::new("/run/current-system/sw/bin/nix"))
});
const TIMEOUT_NEVER: Option<Duration> = None;
static NEXT_TOKEN_NUMBER: AtomicUsize = AtomicUsize::new(1);
@ -60,6 +87,42 @@ fn next_token() -> Token {
Token(tok)
}
trait EventExt {
type Display;
fn display(&self) -> Self::Display;
}
#[derive(Copy)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct EventDisplay {
token: Token,
error: bool,
writable: bool,
write_closed: bool,
readable: bool,
read_closed: bool,
}
impl EventExt for Event {
type Display = EventDisplay;
fn display(&self) -> Self::Display {
EventDisplay {
token: self.token(),
error: self.is_error(),
writable: self.is_writable(),
write_closed: self.is_write_closed(),
readable: self.is_readable(),
read_closed: self.is_read_closed(),
}
}
}
impl Display for EventDisplay {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
todo!()
}
}
#[derive(Debug)]
pub struct Daemon {
config_path: Arc<Path>,
@ -132,6 +195,32 @@ impl Daemon {
token
}
fn register_with_name<S>(&mut self, fd: RawFd, kind: FdKind, name: Box<OsStr>) -> Token {
let token = next_token();
debug!(
"Registering new {} FdInfo for {fd} ({}) with token {token:?}",
name.to_string_lossy(),
kind.name_str(),
);
self.fd_info
.insert_unique(FdInfo::new_with_name(fd, kind, name))
.unwrap();
self.tokfd
.insert_unique(TokenFd { token, fd })
.unwrap_or_else(|e| todo!("{e}"));
let mut source = SourceFd(&fd);
self.poller
.registry()
.register(&mut source, token, Interest::READABLE)
.unwrap_or_else(|e| unreachable!("registering {fd:?} with poller failed: {e}"));
token
}
fn deregister(&mut self, fd: RawFd) {
let info = self
.fd_info
@ -279,6 +368,32 @@ const DAEMON: Token = Token(0);
/// Private helpers.
impl Daemon {
fn proxy_stdio(&mut self, fd: &BorrowedFd) -> Result<(), IoError> {
let info = self.fd_info.get(&fd.as_raw_fd()).unwrap();
let label = match info.kind {
FdKind::ChildStdout => "stdout",
FdKind::ChildStderr => "stderr",
other => unreachable!("child stdio cannot have kind {other:?}"),
};
// FIXME: don't use a new allocation every time.
let mut buffer: Vec<u8> = Vec::with_capacity(1024);
// FIXME: handle line buffering correctly.
loop {
let count = rustix::io::read(fd, spare_capacity(&mut buffer))
.inspect_err(|e| error!("read() on child stdio fd {fd:?} failed: {e}"))?;
if count == 0 {
break;
}
for line in buffer.lines() {
info!("[child {label}]: {}", line.as_bstr())
}
}
Ok(())
}
fn read_cmd(&mut self, fd: &BorrowedFd) -> Result<(), IoError> {
// FIXME: don't use a new allocation every time.
let mut cmd_buffer: Vec<u8> = Vec::with_capacity(1024);
@ -317,42 +432,82 @@ impl Daemon {
}
fn dispatch_cmd(&mut self, cmd: DaemonCmd) -> Result<(), IoError> {
// Write the new file...
let (name, value) = match cmd {
DaemonCmd::Append { name, value } => (name, value),
};
let mut opts = File::options();
opts.read(true)
.write(true)
.create(false)
.custom_flags(libc::O_CLOEXEC);
let source_file = SourceFile::open_from(self.config_path.clone(), opts)?;
let source_file = crate::open_source_file(self.config_path.clone())?;
let pri = crate::get_where(source_file.clone()).unwrap_or_else(|e| todo!("{e}"));
let new_pri = pri - 1;
//let new_pri_line =
// crate::get_next_prio_line(source_file.clone(), Arc::from(name), Arc::from(value));
// Get next priority line.
let source_lines = source_file.lines()?;
let penultimate = source_lines.get(source_lines.len() - 2);
// FIXME: don't rely on whitespace lol
debug_assert_eq!(penultimate.map(SourceLine::text).as_deref(), Some(" ];"));
let penultimate = penultimate.unwrap();
let new_generation = 0 - new_pri;
let new_line = SourceLine {
line: penultimate.line,
path: source_file.path(),
text: Arc::from(format!(
" {} = lib.mkOverride ({}) ({}); # DYNIX GENERATION {}",
name.to_nix_decl(),
new_pri,
value,
new_generation,
)),
};
drop(source_lines);
let opt_name = name.to_nix_decl();
let new_line = crate::get_next_prio_line(
source_file.clone(),
&opt_name,
new_pri,
&value.to_nix_source(),
)
.unwrap_or_else(|e| panic!("someone is holding a reference to source.lines(): {e}"));
crate::write_next_prio(source_file, new_line).unwrap_or_else(|e| todo!("{e}"));
// Rebuild and switch.
// FIXME: allow passing additional args.
//let child = Command::new(*NIXOS_REBUILD)
// .arg("switch")
// .arg("--log-format")
// .arg("raw-with-logs")
// .arg("--no-reexec")
// .arg("-v")
// .stdout(Stdio::piped())
// .stderr(Stdio::piped())
// .spawn()
// .inspect_err(|e| {
// error!("failed to spawn `nixos-rebuild` command: {e}");
// })?;
let expr = "(import <nixpkgs/nixos> { }).config.dynamicism.applyDynamicConfiguration { }";
let child = Command::new(*NIX)
.arg("run")
.arg("--show-trace")
.arg("--log-format")
.arg("raw-with-logs")
.arg("--impure")
.arg("-E")
.arg(expr)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.inspect_err(|e| error!("failed to spawn `nix run` command: {e}"))?;
debug!("Spanwed child process {}", child.id());
let pid = Pid::from_child(&child);
let stdout = child.stdout.unwrap_or_else(|| {
unreachable!("`child` is given `.stdout(Stdio::piped())`");
});
let stderr = child.stderr.unwrap_or_else(|| {
unreachable!("`child` is given `.stderr(Stdio::piped())`");
});
let _token = self.register(stdout.into_raw_fd(), FdKind::ChildStdout);
let _token = self.register(stderr.into_raw_fd(), FdKind::ChildStderr);
match rustix::process::pidfd_open(pid, PidfdFlags::NONBLOCK) {
Ok(pidfd) => {
debug!("Opened pidfd {pidfd:?}, for process {pid}");
self.register(pidfd.into_raw_fd(), FdKind::Pid(pid));
},
Err(e) if e.kind() == IoErrorKind::NotFound => {
warn!("child {pid} not found; died before we could open it?");
},
Err(e) => {
error!("Error opening pidfd for child {pid}: {e}");
return Err(e)?;
},
}
Ok(())
}
@ -386,9 +541,9 @@ impl Daemon {
loop {
if tracing::enabled!(tracing::Level::DEBUG) {
trace!("Daemon loop iteration, with file descriptors: ");
debug!("Daemon loop iteration, with file descriptors: ");
for info in &self.fd_info {
trace!("- {}", info.display());
debug!("- {}", info.display());
}
}
@ -441,7 +596,7 @@ impl Daemon {
}
fn handle_event(&mut self, event: &Event) -> Result<(), IoError> {
trace!("Handling event {event:?}");
trace!("Handling event {event:#?}");
match event.token() {
DAEMON => {
@ -487,16 +642,92 @@ impl Daemon {
},
other_token => {
// This must be a stream fd.
let stream_fd = self.fd_for_token(other_token).unwrap_or_else(|| {
let fd = self.fd_for_token(other_token).unwrap_or_else(|| {
unreachable!("tried to get fd for non-existent token? {other_token:?}")
});
let Some(info) = self.fd_info.get(&fd) else {
panic!("Received an event on an unregistered fd {fd}; IO-safety violation?");
};
let either_available = event.is_readable() || event.is_writable();
if !either_available {
info!(
"File descriptor {} r:{}, w:{}",
info.display(),
event.is_readable(),
event.is_writable(),
);
// FIXME: code duplication
if event.is_read_closed() {
self.deregister(fd);
return Ok(());
}
}
match info.kind {
FdKind::Pid(pid) => {
debug!("Reaping child process {pid}");
// SAFETY: `fd` cannot have been closed yet, since that's what we do here.
let pidfd = unsafe { BorrowedFd::borrow_raw(fd) };
let status = rustix::waitid(WaitId::PidFd(pidfd), WaitIdOptions::EXITED)
.unwrap_or_else(|e| {
todo!("waitid() can fail? on pid {pid}: {e}");
})
.unwrap_or_else(|| {
todo!("waitid() returned None? for pid {pid}");
});
debug!("waitid() for pid {pid} returned status: {status:?}");
let is_dead = status.exited() || status.killed() || status.dumped();
if !is_dead {
todo!("Handle process {pid} events that aren't death: {status:?}");
}
let Some(exit_code) = status.exit_status() else {
unreachable!("Process {pid} died with no exit code at all? {status:?}");
};
debug!("Child process {pid} exited with code {exit_code}");
// Close the pidfd.
self.deregister(fd);
let stream = self
.fd_info
.iter()
.find_map(|info| (info.kind == FdKind::SockStream).then_some(info));
if let Some(stream) = stream {
// SAFETY: fixme.
let stream_fd = unsafe { BorrowedFd::borrow_raw(stream.fd) };
let payload = format!("{{ \"status\": {exit_code} }}\n");
if let Err(e) = rustix::io::write(stream_fd, payload.as_bytes()) {
error!("couldn't write reply to stream fd {stream_fd:?}: {e}");
}
}
},
FdKind::ChildStdout => {
warn!("got stdout");
// SAFETY: oh boy.
let stdout = unsafe { BorrowedFd::borrow_raw(fd) };
self.proxy_stdio(&stdout)
.unwrap_or_else(|e| error!("failed to proxy child stdout: {e}"));
},
FdKind::ChildStderr => {
warn!("got stderr");
// SAFETY: oh boy.
let stderr = unsafe { BorrowedFd::borrow_raw(fd) };
self.proxy_stdio(&stderr)
.unwrap_or_else(|e| error!("failed to proxy child stderr: {e}"));
},
FdKind::SockStream => {
// SAFETY: oh boy.
let stream_fd = unsafe { BorrowedFd::borrow_raw(fd) };
self.read_cmd(&stream_fd).unwrap();
},
kind => todo!("{kind:?}"),
};
if event.is_read_closed() {
self.deregister(stream_fd);
} else {
// SAFETY: oh boy.
let stream_fd = unsafe { BorrowedFd::borrow_raw(stream_fd) };
self.read_cmd(&stream_fd).unwrap();
self.deregister(fd);
return Ok(());
}
},
}

View file

@ -47,6 +47,24 @@ impl ConvenientAttrPath {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Deserialize, Serialize)]
#[serde(untagged)]
pub enum NixLiteral {
String(String),
Number(f64),
// FIXME: add the rest =P
}
impl NixLiteral {
pub fn to_nix_source(&self) -> String {
match self {
NixLiteral::String(s) => format!("\"{s}\""),
NixLiteral::Number(n) => n.to_string(),
}
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(serde::Deserialize, serde::Serialize)]
#[serde(tag = "action", content = "args", rename_all = "snake_case")]
@ -54,6 +72,6 @@ impl ConvenientAttrPath {
pub enum DaemonCmd {
Append {
name: ConvenientAttrPath,
value: Box<str>,
value: Box<NixLiteral>,
},
}

View file

@ -3,6 +3,7 @@ use std::{os::fd::RawFd, sync::OnceLock};
use circular_buffer::CircularBuffer;
use iddqd::{BiHashItem, IdOrdItem};
use mio::Token;
use rustix::process::Pid;
use crate::prelude::*;
@ -76,11 +77,11 @@ impl FdInfo {
impl IdOrdItem for FdInfo {
type Key<'a> = &'a RawFd;
iddqd::id_upcast!();
fn key(&self) -> &RawFd {
&self.fd
}
iddqd::id_upcast!();
}
#[derive(Debug)]
@ -105,13 +106,16 @@ impl<'a> Display for FdInfoDisplay<'a> {
}
#[derive(Copy)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)]
#[non_exhaustive]
pub enum FdKind {
File,
Socket,
SockStream,
Poller,
ChildStdout,
ChildStderr,
Pid(Pid),
#[default]
Unknown,
}
@ -124,6 +128,9 @@ impl FdKind {
Socket => "socket",
SockStream => "socket stream",
Poller => "poller",
ChildStdout => "child stdout",
ChildStderr => "child stderr",
Pid(_) => "pidfd",
Unknown => "«unknown»",
}
}
@ -140,6 +147,8 @@ impl BiHashItem for TokenFd {
type K1<'a> = Token;
type K2<'a> = RawFd;
iddqd::bi_upcast!();
fn key1(&self) -> Token {
self.token
}
@ -147,8 +156,6 @@ impl BiHashItem for TokenFd {
fn key2(&self) -> RawFd {
self.fd
}
iddqd::bi_upcast!();
}
impl From<TokenFd> for (Token, RawFd) {

View file

@ -99,6 +99,22 @@ static MK_OVERRIDE_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?-u)\bmkOverride\s+\((?<priority>[\d-]+)\)").unwrap()
});
pub(crate) fn open_source_file(path: Arc<Path>) -> Result<SourceFile, IoError> {
let mut opts = File::options();
opts.read(true)
.write(true)
.create(false)
.custom_flags(libc::O_CLOEXEC);
SourceFile::open_from(Arc::clone(&path), opts)
.tap_err(|e| error!("couldn't open source file at {}: {e}", path.display()))
}
pub(crate) fn get_line_to_insert() -> SourceLine {
//
todo!();
}
#[tracing::instrument(level = "debug")]
pub fn do_append(args: Arc<Args>, append_args: AppendCmd) -> Result<(), BoxDynError> {
let filepath = Path::new(&args.file);
@ -110,22 +126,16 @@ pub fn do_append(args: Arc<Args>, append_args: AppendCmd) -> Result<(), BoxDynEr
filepath.to_path_buf()
};
let mut opts = File::options();
opts.read(true)
.write(true)
.create(false)
.custom_flags(libc::O_CLOEXEC);
let source_file = SourceFile::open_from(Arc::from(filepath), opts)?;
let source_file = open_source_file(Arc::from(filepath))?;
let pri = get_where(source_file.clone())?;
let new_pri = pri - 1;
let new_pri_line = get_next_prio_line(
source_file.clone(),
append_args.name,
&append_args.name,
new_pri,
append_args.value,
&append_args.value,
)?;
debug!("new_pri_line={new_pri_line}");
@ -210,9 +220,9 @@ pub fn get_where(dynamic_nix: SourceFile) -> Result<i64, BoxDynError> {
pub fn get_next_prio_line(
source: SourceFile,
option_name: Arc<str>,
option_name: &str,
new_prio: i64,
new_value: Arc<str>,
new_value: &str,
) -> Result<SourceLine, BoxDynError> {
let source_lines = source.lines()?;
let penultimate = source_lines.get(source_lines.len() - 2);
@ -246,6 +256,8 @@ pub fn write_next_prio(mut source: SourceFile, new_line: SourceLine) -> Result<(
text: Arc::from(" }"),
};
debug!("writing new source line: {new_line}");
source.insert_lines(&[new_mod_start, new_line, new_mod_end])?;
Ok(())

View file

@ -6,6 +6,7 @@ use std::{
cell::{Ref, RefCell},
hash::Hash,
io::{BufRead, BufReader, BufWriter},
mem::{self, MaybeUninit},
ops::Deref,
ptr,
sync::{Arc, Mutex, OnceLock},
@ -103,6 +104,28 @@ pub struct SourceFile {
lines: Arc<OnceLock<RefCell<Vec<SourceLine>>>>,
}
#[derive(Debug)]
#[repr(transparent)]
pub struct OpaqueDerefSourceLines<'s>(Ref<'s, [SourceLine]>);
impl<'s> Deref for OpaqueDerefSourceLines<'s> {
type Target = [SourceLine];
fn deref(&self) -> &[SourceLine] {
&*self.0
}
}
#[derive(Debug)]
#[repr(transparent)]
pub struct OpaqueDerefSourceLine<'s>(Ref<'s, SourceLine>);
impl<'s> Deref for OpaqueDerefSourceLine<'s> {
type Target = SourceLine;
fn deref(&self) -> &SourceLine {
&*self.0
}
}
impl SourceFile {
/// Panics if `path` is a directory path instead of a file path.
pub fn open_from(path: Arc<Path>, options: OpenOptions) -> Result<Self, IoError> {
@ -159,15 +182,17 @@ impl SourceFile {
Ok(self._lines_slice())
}
pub fn lines(&self) -> Result<impl Deref<Target = [SourceLine]> + '_, IoError> {
self._lines()
pub fn lines(&self) -> Result<OpaqueDerefSourceLines<'_>, IoError> {
let lines = self._lines()?;
Ok(OpaqueDerefSourceLines(lines))
}
pub fn line(&self, line: Line) -> Result<impl Deref<Target = SourceLine> + '_, IoError> {
pub fn line(&self, line: Line) -> Result<OpaqueDerefSourceLine<'_>, IoError> {
let lines_lock = self._lines()?;
let line = Ref::map(lines_lock, |lines| &lines[line.index() as usize]);
Ok(line)
Ok(OpaqueDerefSourceLine(line))
}
/// `lines` but already be initialized.

View file

@ -44,11 +44,29 @@ in
};
};
# Setup XDG base directories for me.
security.pam.services.login = {
rules.session.xdg = {
enable = true;
control = "optional";
modulePath = "${pkgs.pam_xdg}/lib/security/pam_xdg.so";
args = [ ];
order = 10500;
};
};
environment.pathsToLink = [ "/share" ];
environment.extraOutputsToInstall = [ "modules" ];
environment.variables = {
"NIXOS_CONFIG" = "/etc/nixos/configuration.nix";
};
environment.sessionVariables = {
"NIXOS_CONFIG" = "/etc/nixos/configuration.nix";
};
#systemd.services.dynix-daemon = {
#
#};
environment.shellAliases = {
ls = "eza --long --header --group --group-directories-first --classify --binary";
@ -58,5 +76,7 @@ in
eza
fd
ripgrep
netcat.nc
socat
];
}

View file

@ -4,6 +4,7 @@
import functools
from pathlib import Path
import json
import shlex
import textwrap
import tomllib
@ -55,31 +56,76 @@ def get_config_file() -> dict[str, Any]:
with open(config_file_path, "rb") as f:
config_data = tomllib.load(f)
config_file_path.unlink()
try:
config_file_path.unlink()
except Exception as e:
machine.log(f"Couldn't unlike path {config_file_path}: {e}")
raise
return config_data
@beartype
def dynix_append(option: str, value: Any):
def dynix_append_daemon(option: str, value: Any):
#machine.succeed(f'''
# dynix append {shlex.quote(option)} {shlex.quote(str(value))}
#'''.strip())
payload = json.dumps(dict(
action="append",
args=dict(
name=option,
value=value,
),
))
machine.succeed(f'''
echo '{payload}' | socat -T10 -,ignoreeof /run/user/0/dynix.sock
''')
@beartype
def dynix_append_traditional(option: str, value: Any):
machine.succeed(f'''
dynix append {shlex.quote(option)} {shlex.quote(str(value))}
'''.strip())
@beartype
def do_apply():
expr = textwrap.dedent("""
(import <nixpkgs/nixos> { }).config.dynamicism.applyDynamicConfiguration { }
""").strip()
machine.succeed(rf"""
nix run --show-trace --log-format raw-with-logs --impure -E {shlex.quote(expr)}
""".strip())
@beartype
def dynix_append(option: str, value: Any):
use_daemon = True
#use_daemon = False
if use_daemon:
dynix_append_daemon(option, value)
else:
dynix_append_traditional(option, value)
machine.log("Doing test initialization and checks")
machine.wait_for_unit("default.target")
machine.wait_for_unit("install-dynix.service")
dynix_out = machine.succeed("dynix --version")
assert "dynix" in dynix_out, f"dynix not in {dynix_out=}"
machine.succeed("systemctl start user@0.service")
machine.wait_for_unit("user@0.service")
machine.succeed(textwrap.dedent(r'''
systemd-run --collect --unit=dynix-daemon.service \
-E "RUST_LOG=trace" \
-E "PATH=$PATH" \
-E "NIX_PATH=$NIX_PATH" \
-E "NIXOS_CONFIG=$NIXOS_CONFIG" \
-p "SuccessExitStatus=0 2" \
dynix daemon --color=always
'''))
machine.wait_for_unit("dynix-daemon.service")
machine.log("Checking initial harmonia.service conditions")
# Config should have our initial values.
config_toml = get_config_file()
assert int(config_toml['workers']) == 4, f"{config_toml['workers']=} != 4"
@ -93,24 +139,28 @@ config_toml = get_config_file()
assert int(config_toml['workers']) == 4, f"{config_toml['workers']=} != 4"
assert int(config_toml['max_connection_rate']) == 256, f"{config_toml['max_connection_rate']=} != 256"
machine.log("Testing dynamic workers=20")
new_workers = 20
dynix_append("services.harmonia.settings.workers", new_workers)
do_apply()
machine.log("Testing that workers, but not max_connectin_rate, changed")
# Workers, but not max connection rate, should have changed.
config_toml = get_config_file()
assert int(config_toml['workers']) == new_workers, f"{config_toml['workers']=} != {new_workers}"
assert int(config_toml['max_connection_rate']) == 256, f"{config_toml['max_connection_rate']=} != 256"
machine.log("Testing dynamic max_connection_rate=100")
new_max_connection_rate = 100
dynix_append("services.harmonia.settings.max_connection_rate", new_max_connection_rate)
do_apply()
# Max connection rate should have changed, and workers should be the same as before.
config_toml = get_config_file()
assert int(config_toml['max_connection_rate']) == new_max_connection_rate, f"{config_toml['max_connection_rate']=} != {new_max_connection_rate}"
assert int(config_toml['workers']) == new_workers, f"{config_toml['workers']=} != {new_workers}"
machine.log("Done with tests; stopping dynix-daemon")
machine.succeed("systemctl stop dynix-daemon.service")
# And this should set everything back.
machine.succeed("env PAGER= nixos-rebuild switch --log-format raw-with-logs --no-reexec -v --fallback")
machine.wait_for_unit("harmonia.service")