diff --git a/common/glusterfs-client.nix b/common/glusterfs-client.nix index 05905c4..a4c084f 100644 --- a/common/glusterfs-client.nix +++ b/common/glusterfs-client.nix @@ -3,10 +3,10 @@ environment.systemPackages = [ pkgs.glusterfs ]; fileSystems."/data/compute" = { - device = "c1:/compute"; + device = "192.168.1.71:/compute"; fsType = "glusterfs"; options = [ - "backup-volfile-servers=c2:c3" + "backup-volfile-servers=192.168.1.72:192.168.1.2" "_netdev" ]; }; diff --git a/stateful-commands.txt b/stateful-commands.txt index b49a5ac..1cf5ae8 100644 --- a/stateful-commands.txt +++ b/stateful-commands.txt @@ -11,3 +11,10 @@ mysql credentials postgres credentials * Put secrets/postgres_password into a Nomad var named secrets/postgresql.postgres_password + +adding a new gluster node to the compute volume, with c3 having failed: +(instructions from https://icicimov.github.io/blog/high-availability/Replacing-GlusterFS-failed-node/) + * zippy: sudo mkdir /persist/glusterfs/compute -p + * c1: gluster peer probe 192.168.1.2 (by IP because zippy resolved to a tailscale address) + * c1: gluster volume replace-brick compute c3:/persist/glusterfs/compute/brick1 192.168.1.2:/persist/glusterfs/compute/brick1 commit force + * c1: gluster volume heal compute full