NFS server and client setup.

This commit is contained in:
2025-10-22 13:06:21 +01:00
parent 1262e03e21
commit 967ff34a51
9 changed files with 739 additions and 2 deletions

View File

@@ -1,13 +1,14 @@
{ pkgs, ... }:
{
# Cluster node configuration
# Extends minimal-node with cluster-specific services (Consul, GlusterFS, CIFS)
# Extends minimal-node with cluster-specific services (Consul, GlusterFS, CIFS, NFS)
# Used by: compute nodes (c1, c2, c3)
imports = [
./minimal-node.nix
./unattended-encryption.nix
./cifs-client.nix
./consul.nix
./glusterfs-client.nix
./glusterfs-client.nix # Keep during migration, will be removed in Phase 3
./nfs-services-client.nix # New: NFS client for /data/services
];
}

View File

@@ -0,0 +1,21 @@
{ pkgs, ... }:
{
# NFS client for /data/services
# Mounts from data-services.service.consul (Consul DNS for automatic failover)
# The NFS server registers itself in Consul, so this will automatically
# point to whichever host is currently running the NFS server
fileSystems."/data/services" = {
device = "data-services.service.consul:/persist/services";
fsType = "nfs";
options = [
"x-systemd.automount" # Auto-mount on access
"noauto" # Don't mount at boot (automount handles it)
"x-systemd.idle-timeout=60" # Unmount after 60s of inactivity
"_netdev" # Network filesystem (wait for network)
];
};
# Ensure NFS client packages are available
environment.systemPackages = [ pkgs.nfs-utils ];
}

View File

@@ -0,0 +1,171 @@
{ config, lib, pkgs, ... }:
let
cfg = config.nfsServicesServer;
in
{
options.nfsServicesServer = {
enable = lib.mkEnableOption "NFS services server" // { default = true; };
standbys = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
List of standby hostnames to replicate to (e.g. ["c1"]).
Requires one-time setup on the NFS server:
sudo mkdir -p /persist/root/.ssh
sudo ssh-keygen -t ed25519 -f /persist/root/.ssh/btrfs-replication -N "" -C "root@$(hostname)-replication"
Then add the public key to each standby's nfsServicesStandby.replicationKeys option.
'';
};
};
config = lib.mkIf cfg.enable {
# Persist root SSH directory for replication key
environment.persistence."/persist" = {
directories = [
"/root/.ssh"
];
};
# Bind mount /persist/services to /data/services for local access
# This makes the path consistent with NFS clients
# Use mkForce to override the NFS client mount from cluster-node.nix
fileSystems."/data/services" = lib.mkForce {
device = "/persist/services";
fsType = "none";
options = [ "bind" ];
};
# Nomad node metadata: mark this as the primary storage node
# Jobs can constrain to ${meta.storage_role} = "primary"
services.nomad.settings.client.meta = {
storage_role = "primary";
};
# NFS server configuration
services.nfs.server = {
enable = true;
exports = ''
/persist/services 192.168.1.0/24(rw,sync,no_subtree_check,no_root_squash)
'';
};
# Consul service registration for NFS
services.consul.extraConfig.services = [{
name = "data-services";
port = 2049;
checks = [{
tcp = "localhost:2049";
interval = "30s";
}];
}];
# Firewall for NFS
networking.firewall.allowedTCPPorts = [ 2049 111 20048 ];
networking.firewall.allowedUDPPorts = [ 2049 111 20048 ];
# systemd services: NFS server split-brain check + replication services
systemd.services = lib.mkMerge ([
# Safety check: prevent split-brain by ensuring no other NFS server is active
{
nfs-server = {
preStart = ''
# Wait for Consul to be available
for i in {1..30}; do
if ${pkgs.netcat}/bin/nc -z localhost 8600; then
break
fi
echo "Waiting for Consul DNS... ($i/30)"
sleep 1
done
# Check if another NFS server is already registered in Consul
CURRENT_SERVER=$(${pkgs.dnsutils}/bin/dig +short @localhost -p 8600 data-services.service.consul | head -1 || true)
MY_IP=$(${pkgs.iproute2}/bin/ip -4 addr show | ${pkgs.gnugrep}/bin/grep -oP '(?<=inet\s)\d+(\.\d+){3}' | ${pkgs.gnugrep}/bin/grep -v '^127\.' | head -1)
if [ -n "$CURRENT_SERVER" ] && [ "$CURRENT_SERVER" != "$MY_IP" ]; then
echo "ERROR: Another NFS server is already active at $CURRENT_SERVER"
echo "This host ($MY_IP) is configured as NFS server but should be standby."
echo "To fix:"
echo " 1. If this is intentional (failback), first demote the other server"
echo " 2. Update this host's config to use nfs-services-standby.nix instead"
echo " 3. Sync data from active server before promoting this host"
exit 1
fi
echo "NFS server startup check passed (no other active server found)"
'';
};
}
] ++ (lib.forEach cfg.standbys (standby: {
"replicate-services-to-${standby}" = {
description = "Replicate /persist/services to ${standby}";
path = [ pkgs.btrfs-progs pkgs.openssh pkgs.coreutils pkgs.findutils pkgs.gnugrep ];
script = ''
set -euo pipefail
SSH_KEY="/persist/root/.ssh/btrfs-replication"
if [ ! -f "$SSH_KEY" ]; then
echo "ERROR: SSH key not found at $SSH_KEY"
echo "Run: sudo ssh-keygen -t ed25519 -f $SSH_KEY -N \"\" -C \"root@$(hostname)-replication\""
exit 1
fi
SNAPSHOT_NAME="services@$(date +%Y%m%d-%H%M%S)"
SNAPSHOT_PATH="/persist/$SNAPSHOT_NAME"
# Create readonly snapshot
btrfs subvolume snapshot -r /persist/services "$SNAPSHOT_PATH"
# Find previous snapshot on sender
PREV_LOCAL=$(ls -t /persist/services@* 2>/dev/null | grep -v "^$SNAPSHOT_PATH$" | head -1 || true)
# Check what snapshots exist on the receiver
REMOTE_SNAPSHOTS=$(ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
"ls -t /persist/services-standby/services@* 2>/dev/null || true")
# Decide: incremental or full send
if [ -n "$PREV_LOCAL" ] && echo "$REMOTE_SNAPSHOTS" | grep -q "$(basename "$PREV_LOCAL")"; then
# Receiver has the parent snapshot, do incremental
echo "Incremental send from $(basename $PREV_LOCAL) to ${standby}"
btrfs send -p "$PREV_LOCAL" "$SNAPSHOT_PATH" | \
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
"btrfs receive /persist/services-standby"
else
# Receiver doesn't have parent (new standby or missing snapshot), do full send
echo "Full send to ${standby} (new standby or parent snapshot not found on receiver)"
btrfs send "$SNAPSHOT_PATH" | \
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
"btrfs receive /persist/services-standby"
fi
# Cleanup old snapshots on sender (keep last 24 hours = 288 snapshots at 5min intervals)
find /persist -maxdepth 1 -name 'services@*' -mmin +1440 -exec btrfs subvolume delete {} \;
'';
serviceConfig = {
Type = "oneshot";
User = "root";
};
};
}))
);
systemd.timers = lib.mkMerge (
lib.forEach cfg.standbys (standby: {
"replicate-services-to-${standby}" = {
description = "Timer for replicating /persist/services to ${standby}";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*:0/5"; # Every 5 minutes
Persistent = true;
};
};
})
);
};
}

View File

@@ -0,0 +1,68 @@
{ config, lib, pkgs, ... }:
let
cfg = config.nfsServicesStandby;
in
{
options.nfsServicesStandby = {
enable = lib.mkEnableOption "NFS services standby" // { default = true; };
replicationKeys = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
SSH public keys authorized to replicate btrfs snapshots to this standby.
These keys are restricted to only run 'btrfs receive /persist/services-standby'.
Get the public key from the NFS server:
ssh <nfs-server> sudo cat /persist/root/.ssh/btrfs-replication.pub
'';
};
};
config = lib.mkIf cfg.enable {
# Allow root SSH login for replication (restricted by command= in authorized_keys)
# This is configured in common/sshd.nix
# Restricted SSH keys for btrfs replication
users.users.root.openssh.authorizedKeys.keys =
map (key: ''command="btrfs receive /persist/services-standby",restrict ${key}'') cfg.replicationKeys;
# Mount point for services-standby subvolume
# This is just declarative documentation - the subvolume must be created manually once:
# sudo btrfs subvolume create /persist/services-standby
# After that, it will persist across reboots (it's under /persist)
fileSystems."/persist/services-standby" = {
device = "/persist/services-standby";
fsType = "none";
options = [ "bind" ];
noCheck = true;
};
# Cleanup old snapshots on standby (keep last 48 hours for safety)
systemd.services.cleanup-services-standby-snapshots = {
description = "Cleanup old btrfs snapshots in services-standby";
path = [ pkgs.btrfs-progs pkgs.findutils ];
script = ''
set -euo pipefail
# Keep last 48 hours of snapshots (576 snapshots at 5min intervals)
find /persist/services-standby -maxdepth 1 -name 'services@*' -mmin +2880 -exec btrfs subvolume delete {} \; || true
'';
serviceConfig = {
Type = "oneshot";
User = "root";
};
};
systemd.timers.cleanup-services-standby-snapshots = {
description = "Timer for cleaning up old snapshots on standby";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "daily";
Persistent = true;
};
};
};
}

View File

@@ -5,6 +5,7 @@
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
PermitRootLogin = "prohibit-password"; # Allow root login with SSH keys only
};
};