diff --git a/.sops.yaml b/.sops.yaml index 1b1875c..4c1ac48 100644 --- a/.sops.yaml +++ b/.sops.yaml @@ -3,7 +3,7 @@ keys: - &server_zippy age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac - &server_chilly age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp - &server_alo_cloud_1 age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z - - &server_c1 age1e7ejamlagumpgjw56h82e9rsz2aplgzmll4np073a9lyvxw2gauqswpqwl + - &server_c1 age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt - &server_c2 age1gekmz8kc8r2lc2x6d4u63s2lnpmres4hu9wulxh29ch74ud7wfksq56xam - &server_c3 age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer creation_rules: diff --git a/hosts/c1/default.nix b/hosts/c1/default.nix index 8865dcb..e964bac 100644 --- a/hosts/c1/default.nix +++ b/hosts/c1/default.nix @@ -8,11 +8,11 @@ ]; diskLayout = { - mainDiskDevice = "/dev/disk/by-id/nvme-SAMSUNG_MZVLW256HEHP-000H1_S340NX0K910298"; + mainDiskDevice = "/dev/disk/by-id/nvme-KINGSTON_SNV3S1000G_50026B7383365CD3"; #keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777640496-0:0"; keyDiskDevice = "/dev/sda"; }; networking.hostName = "c1"; - services.tailscaleAutoconnect.authkey = "tskey-auth-kmFvBT3CNTRL-wUbELKSd5yhuuTwTcgJZxhPUTxKgcYKF"; + services.tailscaleAutoconnect.authkey = "tskey-auth-k2nQ771YHM11CNTRL-YVpoumL2mgR6nLPG51vNhRpEKMDN7gLAi"; } diff --git a/setup-host.txt b/setup-host.txt index 1d7cc9a..31374e9 100644 --- a/setup-host.txt +++ b/setup-host.txt @@ -9,3 +9,4 @@ * set the actual device IDs in hosts//default.nix * on base host: nix run github:nix-community/nixos-anywhere -- --flake '.#' nixos@ * after confirmed working, update hosts//default.nix to set keyFile to /dev/sdX (otherwise when the USB drive fails it's harder to replace) +* if replacing failed host in place, update key in .sops.yaml with the output from "ssh-keyscan | ssh-to-age" diff --git a/stateful-commands.txt b/stateful-commands.txt index f3aadbc..94e7375 100644 --- a/stateful-commands.txt +++ b/stateful-commands.txt @@ -21,3 +21,9 @@ adding a new gluster node to the compute volume, with c3 having failed: * c1: gluster peer detach c3 same to then later replace 192.168.1.2 with 192.168.1.73 + +replacing failed / reinstalled gluster volume (c1 in this case). all commands on c2: + * gluster volume remove-brick compute replica 2 c1:/persist/glusterfs/compute/brick1 force + * gluster peer detach c1 + * gluster peer probe 192.168.1.71 (not c1 because switching to IPs to avoid DNS/tailscale issues) + * gluster volume add-brick compute replica 3 192.168.1.71:/persist/glusterfs/compute/brick1