Replace c3 (failed) with zippy for clusterfs.
Use IPs for mounting glusterfs, prevents boot mount failing before networking comes up.
This commit is contained in:
@@ -3,10 +3,10 @@
|
|||||||
environment.systemPackages = [ pkgs.glusterfs ];
|
environment.systemPackages = [ pkgs.glusterfs ];
|
||||||
|
|
||||||
fileSystems."/data/compute" = {
|
fileSystems."/data/compute" = {
|
||||||
device = "c1:/compute";
|
device = "192.168.1.71:/compute";
|
||||||
fsType = "glusterfs";
|
fsType = "glusterfs";
|
||||||
options = [
|
options = [
|
||||||
"backup-volfile-servers=c2:c3"
|
"backup-volfile-servers=192.168.1.72:192.168.1.2"
|
||||||
"_netdev"
|
"_netdev"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -11,3 +11,10 @@ mysql credentials
|
|||||||
|
|
||||||
postgres credentials
|
postgres credentials
|
||||||
* Put secrets/postgres_password into a Nomad var named secrets/postgresql.postgres_password
|
* Put secrets/postgres_password into a Nomad var named secrets/postgresql.postgres_password
|
||||||
|
|
||||||
|
adding a new gluster node to the compute volume, with c3 having failed:
|
||||||
|
(instructions from https://icicimov.github.io/blog/high-availability/Replacing-GlusterFS-failed-node/)
|
||||||
|
* zippy: sudo mkdir /persist/glusterfs/compute -p
|
||||||
|
* c1: gluster peer probe 192.168.1.2 (by IP because zippy resolved to a tailscale address)
|
||||||
|
* c1: gluster volume replace-brick compute c3:/persist/glusterfs/compute/brick1 192.168.1.2:/persist/glusterfs/compute/brick1 commit force
|
||||||
|
* c1: gluster volume heal compute full
|
||||||
|
|||||||
Reference in New Issue
Block a user