Reinstall c1 after failed disk.

This commit is contained in:
2025-02-02 12:43:54 +00:00
parent 3c3e96dc72
commit cb6b27f00c
4 changed files with 10 additions and 3 deletions

View File

@@ -3,7 +3,7 @@ keys:
- &server_zippy age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
- &server_chilly age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
- &server_alo_cloud_1 age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
- &server_c1 age1e7ejamlagumpgjw56h82e9rsz2aplgzmll4np073a9lyvxw2gauqswpqwl
- &server_c1 age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
- &server_c2 age1gekmz8kc8r2lc2x6d4u63s2lnpmres4hu9wulxh29ch74ud7wfksq56xam
- &server_c3 age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
creation_rules:

View File

@@ -8,11 +8,11 @@
];
diskLayout = {
mainDiskDevice = "/dev/disk/by-id/nvme-SAMSUNG_MZVLW256HEHP-000H1_S340NX0K910298";
mainDiskDevice = "/dev/disk/by-id/nvme-KINGSTON_SNV3S1000G_50026B7383365CD3";
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777640496-0:0";
keyDiskDevice = "/dev/sda";
};
networking.hostName = "c1";
services.tailscaleAutoconnect.authkey = "tskey-auth-kmFvBT3CNTRL-wUbELKSd5yhuuTwTcgJZxhPUTxKgcYKF";
services.tailscaleAutoconnect.authkey = "tskey-auth-k2nQ771YHM11CNTRL-YVpoumL2mgR6nLPG51vNhRpEKMDN7gLAi";
}

View File

@@ -9,3 +9,4 @@
* set the actual device IDs in hosts/<target>/default.nix
* on base host: nix run github:nix-community/nixos-anywhere -- --flake '.#<target>' nixos@<target IP>
* after confirmed working, update hosts/<target>/default.nix to set keyFile to /dev/sdX (otherwise when the USB drive fails it's harder to replace)
* if replacing failed host in place, update key in .sops.yaml with the output from "ssh-keyscan <host> | ssh-to-age"

View File

@@ -21,3 +21,9 @@ adding a new gluster node to the compute volume, with c3 having failed:
* c1: gluster peer detach c3
same to then later replace 192.168.1.2 with 192.168.1.73
replacing failed / reinstalled gluster volume (c1 in this case). all commands on c2:
* gluster volume remove-brick compute replica 2 c1:/persist/glusterfs/compute/brick1 force
* gluster peer detach c1
* gluster peer probe 192.168.1.71 (not c1 because switching to IPs to avoid DNS/tailscale issues)
* gluster volume add-brick compute replica 3 192.168.1.71:/persist/glusterfs/compute/brick1