{ config, lib, pkgs, ... }: let cfg = config.nfsServicesStandby; in { options.nfsServicesStandby = { enable = lib.mkEnableOption "NFS services standby" // { default = true; }; replicationKeys = lib.mkOption { type = lib.types.listOf lib.types.str; default = []; description = '' SSH public keys authorized to replicate btrfs snapshots to this standby. These keys are restricted to only run 'btrfs receive /persist/services-standby'. Get the public key from the NFS server: ssh sudo cat /persist/root/.ssh/btrfs-replication.pub ''; }; }; config = lib.mkIf cfg.enable { # Allow root SSH login for replication (restricted by command= in authorized_keys) # This is configured in common/sshd.nix # Restricted SSH keys for btrfs replication users.users.root.openssh.authorizedKeys.keys = map (key: ''command="btrfs receive /persist/services-standby",restrict ${key}'') cfg.replicationKeys; # Mount point for services-standby subvolume # This is just declarative documentation - the subvolume must be created manually once: # sudo btrfs subvolume create /persist/services-standby # After that, it will persist across reboots (it's under /persist) fileSystems."/persist/services-standby" = { device = "/persist/services-standby"; fsType = "none"; options = [ "bind" ]; noCheck = true; }; # Cleanup old snapshots on standby (keep last 4 hours for HA failover) systemd.services.cleanup-services-standby-snapshots = { description = "Cleanup old btrfs snapshots in services-standby"; path = [ pkgs.btrfs-progs pkgs.findutils ]; script = '' set -euo pipefail # Keep last 4 hours of snapshots (48 snapshots at 5min intervals) find /persist/services-standby -maxdepth 1 -name 'services@*' -mmin +240 -exec btrfs subvolume delete {} \; || true ''; serviceConfig = { Type = "oneshot"; User = "root"; }; }; systemd.timers.cleanup-services-standby-snapshots = { description = "Timer for cleaning up old snapshots on standby"; wantedBy = [ "timers.target" ]; timerConfig = { OnCalendar = "daily"; Persistent = true; }; }; }; }