Compare commits

..

51 Commits

Author SHA1 Message Date
82ec765245 HM configs to nix-dev. 2024-09-11 17:00:38 +01:00
96ad1f6158 Update HM config. 2024-09-11 16:55:45 +01:00
689217be78 Install file. 2024-09-11 16:34:38 +01:00
1ea0447d24 Additional ssh keys. 2024-09-11 16:34:19 +01:00
871162724a Switch to neovim. 2024-09-11 15:53:54 +01:00
10ebfee6fd Set up restarting health checks. 2024-09-10 20:08:57 +01:00
0e26c10a02 Use zsh. 2024-09-10 18:34:30 +01:00
148e2d6b44 WIP: random tweaks 2024-09-10 18:34:11 +01:00
b12fc1ad86 Upgrade to 2024.8 2024-09-09 10:23:01 +01:00
3199dfd987 Upgrade to 2024.6 2024-09-09 10:20:55 +01:00
cae5983876 Upgrade to 2024.4 2024-09-09 10:18:48 +01:00
d9d92b0de2 Upgrade to 2024.2 2024-09-09 10:15:32 +01:00
d484215476 WIP: qgis & lizmap config 2024-09-02 14:35:47 +01:00
5829c0f8b1 Update deps. 2024-08-31 08:40:02 +01:00
900c360fad Use nixpkgs cache for activation scripts. 2024-08-31 08:35:16 +01:00
7f88b6b731 Show package diffs at activation. 2024-08-31 07:45:03 +01:00
0986574628 More RAM for mysqld 2024-08-22 10:51:37 +01:00
5bce0ea072 Replace zippy with c3 for glusterfs. 2024-08-22 06:00:25 +01:00
27b4227993 Upgrade to mysql 9.0. 2024-08-06 19:50:53 +01:00
fbaeaee37c More RAM. 2024-08-06 19:43:28 +01:00
ef172ed583 Syncthing config for new c3. 2024-08-06 07:57:53 +01:00
3bb884bb91 Set direct key device name. 2024-08-06 07:52:29 +01:00
60bc1af8c4 Re-setup c3. 2024-08-06 07:38:58 +01:00
968289f2be Fix docs. 2024-08-06 07:38:37 +01:00
bcd1a5c0d5 Make the disko config a module. 2024-08-06 07:36:56 +01:00
258b8e1fdf Workaround for chilly eth interface name being different. 2024-08-05 07:47:20 +01:00
f565b56a5e Don't run nomad on chilly. 2024-08-05 06:29:40 +01:00
978121c75e Instructions for setting up a new host. 2024-08-05 06:27:02 +01:00
fb9da924d2 Reference key disk by device name instead of ID. 2024-08-05 06:26:44 +01:00
c4d688a739 Create new root subvolume on boot instead of relying on a blank snapshot.
Also persist /var/lib/nixos by default.
2024-08-05 06:26:10 +01:00
336873063e Deploy config for chilly. 2024-08-05 06:25:51 +01:00
c861b504c5 Update deps. 2024-08-05 06:25:41 +01:00
07c2e950c0 Persist /var/lib/nixos to keep UIDs stable. 2024-08-05 06:15:18 +01:00
b10092092f Initial config for chilly. 2024-08-04 15:55:51 +01:00
4803e983c1 Basic home manager wiring. 2024-07-30 09:59:50 +01:00
ef729f63a9 Add missing step for peer removal. 2024-07-30 08:56:35 +01:00
b84a90d35a Replace c3 (failed) with zippy for clusterfs.
Use IPs for mounting glusterfs, prevents boot mount failing before networking comes up.
2024-07-26 08:20:17 +01:00
f2ef036bf4 Update node path. 2024-07-25 18:27:21 +01:00
1b7f17d71d Set a password for ppetru. 2024-07-25 14:37:10 +01:00
199518ec57 Use hostname for zippy 2024-07-25 14:29:21 +01:00
baaf58f9d3 Update flake. 2024-07-25 14:28:37 +01:00
f13486e9a3 Add zippy to syncthing. 2024-07-25 14:27:39 +01:00
b8871fe7d1 Configure key file for each host, it varies. 2024-07-25 14:11:01 +01:00
9952890751 Make zippy a compute node. 2024-07-25 14:04:19 +01:00
7d63e71646 Flake configs for zippy. 2024-07-25 11:50:37 +01:00
838f6e981e Update stateVersion 2024-07-25 11:46:24 +01:00
79b53ebba0 Setup files for zippy. 2024-07-25 11:36:17 +01:00
473e93b4bb EVCC initial config. 2024-07-10 16:19:18 +01:00
ae3328d28b Update flake. 2024-07-06 18:15:44 +01:00
b058421e64 Update flake. 2024-07-02 08:41:34 +01:00
4dfd73037c Update flake deps and NixOS to 24.05 2024-06-12 13:31:20 +01:00
36 changed files with 686 additions and 119 deletions

13
common/base-node.nix Normal file
View File

@@ -0,0 +1,13 @@
{ pkgs, ... }:
{
imports = [
./cifs-client.nix
./consul.nix
./glusterfs-client.nix
./impermanence.nix
./sshd.nix
./user-ppetru.nix
./unattended-encryption.nix
./systemd-boot.nix
];
}

View File

@@ -1,16 +1,9 @@
{ pkgs, ... }:
{
imports = [
./cifs-client.nix
./consul.nix
./base-node.nix
./glusterfs.nix
./glusterfs-client.nix
./impermanence.nix
./nomad.nix
./sshd.nix
./user-ppetru.nix
./unattended-encryption.nix
./syncthing-data.nix
./systemd-boot.nix
];
}

View File

@@ -12,7 +12,7 @@ in
client_addr = "0.0.0.0";
datacenter = "alo";
server = server_enabled;
bootstrap_expect = (builtins.length servers + 2) / 2;
bootstrap_expect = if server_enabled then (builtins.length servers + 2) / 2 else null;
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
telemetry = {
prometheus_retention_time = "24h";

View File

@@ -0,0 +1,93 @@
{ lib, config, ... }:
let
cfg = config.diskLayout;
in
{
options.diskLayout = {
mainDiskDevice = lib.mkOption {
type = lib.types.str;
description = "The device ID for the main disk";
};
keyDiskDevice = lib.mkOption {
type = lib.types.str;
description = "The device ID for the key disk";
};
};
config = {
disko.devices = {
disk.main = {
device = cfg.mainDiskDevice;
type = "disk";
content = {
type = "gpt";
partitions = {
esp = {
name = "ESP";
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
luksroot = {
end = "-8G";
content = {
type = "luks";
name = "luksroot";
settings = {
allowDiscards = true;
keyFile = cfg.keyDiskDevice;
keyFileSize = 4096;
};
content = {
type = "btrfs";
subvolumes = {
"root" = {
mountpoint = "/";
mountOptions = [
"compress=zstd"
"noatime"
];
};
"nix" = {
mountpoint = "/nix";
mountOptions = [
"compress=zstd"
"noatime"
];
};
"persist" = {
mountpoint = "/persist";
mountOptions = [
"compress=zstd"
"noatime"
];
};
"log" = {
mountpoint = "/var/log";
mountOptions = [
"compress=zstd"
"noatime"
];
};
};
};
};
};
swap = {
size = "8G";
content = {
type = "swap";
randomEncryption = true;
};
};
};
};
};
};
};
}

View File

@@ -7,6 +7,7 @@
./network.nix
./nix.nix
./packages.nix
./show-changelog.nix
./sudo.nix
./tailscale.nix
];
@@ -21,5 +22,5 @@
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
system.stateVersion = "24.05"; # Did you read the comment?
}

View File

@@ -1,7 +1,8 @@
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
file
nodejs_20
vim
neovim
];
}

View File

@@ -0,0 +1,10 @@
{ pkgs, config, lib, ... }:
{
system.activationScripts.show-update-changelog = ''
if [[ -e /run/current-system ]]; then
echo "[show-update-changelog] System Changelog"
${lib.getExe pkgs.nvd} --nix-bin-dir='${config.nix.package}/bin' diff /run/current-system "$systemConfig"
fi
'';
}

View File

@@ -3,10 +3,10 @@
environment.systemPackages = [ pkgs.glusterfs ];
fileSystems."/data/compute" = {
device = "c1:/compute";
device = "192.168.1.71:/compute";
fsType = "glusterfs";
options = [
"backup-volfile-servers=c2:c3"
"backup-volfile-servers=192.168.1.72:192.168.1.73"
"_netdev"
];
};

View File

@@ -6,6 +6,7 @@
environment.persistence."/persist" = {
directories = [
"/var/lib/nixos"
];
files = [
"/etc/machine-id"
@@ -33,41 +34,27 @@
# reset / at each boot
# Note `lib.mkBefore` is used instead of `lib.mkAfter` here.
boot.initrd.postDeviceCommands = pkgs.lib.mkBefore ''
mkdir -p /mnt
mkdir /mnt
mount /dev/mapper/luksroot /mnt
if [[ -e /mnt/root ]]; then
mkdir -p /mnt/old_roots
timestamp=$(date --date="@$(stat -c %Y /mnt/root)" "+%Y-%m-%-d_%H:%M:%S")
mv /mnt/root "/mnt/old_roots/$timestamp"
fi
# We first mount the btrfs root to /mnt
# so we can manipulate btrfs subvolumes.
mount -o subvol=/ /dev/disk/by-label/btrfs /mnt
delete_subvolume_recursively() {
IFS=$'\n'
for i in $(btrfs subvolume list -o "$1" | cut -f 9- -d ' '); do
delete_subvolume_recursively "/mnt/$i"
done
btrfs subvolume delete "$1"
}
# While we're tempted to just delete /root and create
# a new snapshot from /root-blank, /root is already
# populated at this point with a number of subvolumes,
# which makes `btrfs subvolume delete` fail.
# So, we remove them first.
#
# /root contains subvolumes:
# - /root/var/lib/portables
# - /root/var/lib/machines
#
# I suspect these are related to systemd-nspawn, but
# since I don't use it I'm not 100% sure.
# Anyhow, deleting these subvolumes hasn't resulted
# in any issues so far, except for fairly
# benign-looking errors from systemd-tmpfiles.
btrfs subvolume list -o /mnt/root |
cut -f9 -d' ' |
while read subvolume; do
echo "deleting /$subvolume subvolume..."
btrfs subvolume delete "/mnt/$subvolume"
done &&
echo "deleting /root subvolume..." &&
btrfs subvolume delete /mnt/root
for i in $(find /mnt/old_roots/ -maxdepth 1 -mtime +30); do
delete_subvolume_recursively "$i"
done
echo "restoring blank /root subvolume..."
btrfs subvolume snapshot /mnt/root-blank /mnt/root
# Once we're done rolling back to a blank snapshot,
# we can unmount /mnt and continue on the boot process.
btrfs subvolume create /mnt/root
umount /mnt
'';
}

View File

@@ -4,6 +4,7 @@
# permissions. then, do it again with persistence enabled.
# This could list the owner user but I'm not sure if it's already created at
# the time impermanence setup runs.
# Note: chown syncthing:syncthing /data/sync && chmod 700 /data/sync also seems to work
environment.persistence."/persist".directories = [
"/data/sync"
];
@@ -19,7 +20,8 @@
devices = {
"c1" = { id = "53JGRHQ-VGBYIGH-7IT6Z5S-3IMRY2I-LJZAE3B-QUDH3QF-4F4QKVC-VBWPJQ4"; };
"c2" = { id = "Z3D476N-PUV6WAD-DSJWVBO-TWEOD4I-KDDMNRB-QEBOP6T-BYPGYTX-RAAYGAW"; };
"c3" = { id = "JUQZO6M-65VXR5Y-ZDWY66F-F2EEMQC-HKMD6EB-33TAGPY-ADCB2OX-QT6IAAQ"; };
"c3" = { id = "D3C3YII-A3QGUNF-LHOGZNX-GJ4ZF3X-VVLMNY5-BBKF3BO-KNHKJMD-EA5QYQJ"; };
"zippy" = { id = "WXDYZWN-JG2OBQH-CC42RMM-LPJGTS6-Y2BV37J-TYSLHL4-VHGYL5M-URI42QJ"; };
};
folders = {
"wordpress" = {
@@ -28,6 +30,7 @@
"c1"
"c2"
"c3"
"zippy"
];
ignorePerms = false;
versioning = {

View File

@@ -4,6 +4,5 @@
allowDiscards = true;
bypassWorkqueues = true;
keyFileSize = 4096;
keyFile = "/dev/sda";
};
}

View File

@@ -1,4 +1,6 @@
{ pkgs, ... }:
{
programs.zsh.enable = true;
users.users.ppetru = {
isNormalUser = true;
extraGroups = [
@@ -6,6 +8,14 @@
"wheel"
];
openssh.authorizedKeys.keys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net" ];
shell = pkgs.zsh;
hashedPassword = "$y$j9T$RStwCKefSqHTIiRo6u6Q50$Pp2dNUeJeUMH0HJdDoM/vXMQa2jqyTTPvvIzACHZhVB";
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+QbeQG/gTPJ2sIMPgZ3ZPEirVo5qX/carbZMKt50YN petru@happy"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDZjL47pUIks2caErnbFYv+McJcWd+GSydzAXHZEtL8s JuiceSSH"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINqULSU2VWUXSrHzFhs9pdXWZPtP/RS9gx7zz/zD/GDG petru@Workshop"
];
};
}

93
flake.lock generated
View File

@@ -9,11 +9,11 @@
"utils": "utils"
},
"locked": {
"lastModified": 1711973905,
"narHash": "sha256-UFKME/N1pbUtn+2Aqnk+agUt8CekbpuqwzljivfIme8=",
"lastModified": 1718194053,
"narHash": "sha256-FaGrf7qwZ99ehPJCAwgvNY5sLCqQ3GDiE/6uLhxxwSY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "88b3059b020da69cbe16526b8d639bd5e0b51c8b",
"rev": "3867348fa92bc892eba5d9ddb2d7a97b9e127a8a",
"type": "github"
},
"original": {
@@ -22,6 +22,26 @@
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1724895876,
"narHash": "sha256-GSqAwa00+vRuHbq9O/yRv7Ov7W/pcMLis3HmeHv8a+Q=",
"owner": "nix-community",
"repo": "disko",
"rev": "511388d837178979de66d14ca4a2ebd5f7991cd3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@@ -38,13 +58,35 @@
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"ppetru-home",
"nixpkgs"
]
},
"locked": {
"lastModified": 1720042825,
"narHash": "sha256-A0vrUB6x82/jvf17qPCpxaM+ulJnD8YZwH9Ci0BsAzE=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "e1391fb22e18a36f57e6999c7a9f966dc80ac073",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-24.05",
"repo": "home-manager",
"type": "github"
}
},
"impermanence": {
"locked": {
"lastModified": 1708968331,
"narHash": "sha256-VUXLaPusCBvwM3zhGbRIJVeYluh2uWuqtj4WirQ1L9Y=",
"lastModified": 1724489415,
"narHash": "sha256-ey8vhwY/6XCKoh7fyTn3aIQs7WeYSYtLbYEG87VCzX4=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "a33ef102a02ce77d3e39c25197664b7a636f9c30",
"rev": "c7f5b394397398c023000cf843986ee2571a1fd7",
"type": "github"
},
"original": {
@@ -55,27 +97,27 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1712310679,
"narHash": "sha256-XgC/a/giEeNkhme/AV1ToipoZ/IVm1MV2ntiK4Tm+pw=",
"lastModified": 1724855419,
"narHash": "sha256-WXHSyOF4nBX0cvHN3DfmEMcLOVdKH6tnMk9FQ8wTNRc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "72da83d9515b43550436891f538ff41d68eecc7f",
"rev": "ae2fc9e0e42caaf3f068c1bfdc11c71734125e06",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1712163089,
"narHash": "sha256-Um+8kTIrC19vD4/lUCN9/cU9kcOsD1O1m+axJqQPyMM=",
"lastModified": 1724819573,
"narHash": "sha256-GnR7/ibgIH1vhoy8cYdmXE6iyZqKqFxQSVkFgosBh6w=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "fd281bd6b7d3e32ddfa399853946f782553163b5",
"rev": "71e91c409d1e654808b2621f28a327acfdad8dc2",
"type": "github"
},
"original": {
@@ -85,12 +127,35 @@
"type": "github"
}
},
"ppetru-home": {
"inputs": {
"home-manager": "home-manager",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1726070115,
"narHash": "sha256-vWwTNxTMpK9d0xtZv3n0BWdMuHYF1kQkdN3cJmErU+A=",
"ref": "refs/heads/master",
"rev": "31703b82124c15b7be8f094c85576b6c4edc4db4",
"revCount": 5,
"type": "git",
"url": "file:/home/ppetru/ppetru-home"
},
"original": {
"type": "git",
"url": "file:/home/ppetru/ppetru-home"
}
},
"root": {
"inputs": {
"deploy-rs": "deploy-rs",
"disko": "disko",
"impermanence": "impermanence",
"nixpkgs": "nixpkgs",
"nixpkgs-unstable": "nixpkgs-unstable"
"nixpkgs-unstable": "nixpkgs-unstable",
"ppetru-home": "ppetru-home"
}
},
"systems": {

View File

@@ -5,11 +5,17 @@
deploy-rs.url = "github:serokell/deploy-rs";
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
impermanence.url = "github:nix-community/impermanence";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05";
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs";
ppetru-home = {
url = "git+file:/home/ppetru/ppetru-home";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, nixpkgs-unstable, deploy-rs, ... }@inputs:
outputs = { self, nixpkgs, nixpkgs-unstable, deploy-rs, disko, ppetru-home, ... }@inputs:
let
inherit (self);
@@ -24,9 +30,29 @@
nixpkgs.overlays = [ overlay-unstable ];
nixpkgs.config.allowUnfree = true;
})
disko.nixosModules.disko
] ++ modules;
specialArgs = { inherit inputs self; };
};
pkgsFor = system: import nixpkgs {
inherit system;
overlays = [ overlay-unstable ];
};
deployPkgsFor = system: import nixpkgs {
inherit system;
overlays = [
overlay-unstable
deploy-rs.overlay
(self: super: {
deploy-rs = {
inherit (pkgsFor system) deploy-rs;
lib = super.deploy-rs.lib;
};
})
];
};
in {
nixosConfigurations = {
c1 = mkNixos "x86_64-linux" [ ./hosts/c1 ];
@@ -34,6 +60,8 @@
c3 = mkNixos "x86_64-linux" [ ./hosts/c3 ];
nix-dev = mkNixos "x86_64-linux" [ ./hosts/nix-dev ];
alo-cloud-1 = mkNixos "aarch64-linux" [./hosts/alo-cloud-1 ];
zippy = mkNixos "x86_64-linux" [ ./hosts/zippy ];
chilly = mkNixos "x86_64-linux" [ ./hosts/chilly ];
};
deploy = {
@@ -42,35 +70,69 @@
hostname = "c1";
profiles.system = {
user = "root";
path = deploy-rs.lib.x86_64-linux.activate.nixos self.nixosConfigurations.c1;
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.c1;
};
};
c2 = {
hostname = "c2";
profiles.system = {
user = "root";
path = deploy-rs.lib.x86_64-linux.activate.nixos self.nixosConfigurations.c2;
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.c2;
};
};
c3 = {
hostname = "c3";
profiles.system = {
user = "root";
path = deploy-rs.lib.x86_64-linux.activate.nixos self.nixosConfigurations.c3;
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.c3;
};
};
nix-dev = {
hostname = "nix-dev";
profiles.system = {
user = "root";
path = deploy-rs.lib.x86_64-linux.activate.nixos self.nixosConfigurations.nix-dev;
profiles = {
system = {
user = "root";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.nix-dev;
};
ppetru = {
user = "ppetru";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.home-manager ppetru-home.homeConfigurations.ppetru;
};
};
};
alo-cloud-1 = {
hostname = "49.13.163.72";
profiles.system = {
user = "root";
path = deploy-rs.lib.aarch64-linux.activate.nixos self.nixosConfigurations.alo-cloud-1;
profiles = {
system = {
user = "root";
path = (deployPkgsFor "aarch64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.alo-cloud-1;
};
};
};
zippy = {
hostname = "zippy";
profiles = {
system = {
user = "root";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.zippy;
};
ppetru = {
user = "ppetru";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.home-manager ppetru-home.homeConfigurations.ppetru;
};
};
};
chilly = {
hostname = "chilly";
profiles = {
system = {
user = "root";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.chilly;
};
ppetru = {
user = "ppetru";
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.home-manager ppetru-home.homeConfigurations.ppetru;
};
};
};
};

View File

@@ -7,5 +7,6 @@
];
networking.hostName = "c1";
boot.initrd.luks.devices."luksroot".keyFile = "/dev/sda";
services.tailscaleAutoconnect.authkey = "tskey-auth-kmFvBT3CNTRL-wUbELKSd5yhuuTwTcgJZxhPUTxKgcYKF";
}

View File

@@ -7,5 +7,6 @@
];
networking.hostName = "c2";
boot.initrd.luks.devices."luksroot".keyFile = "/dev/sda";
services.tailscaleAutoconnect.authkey = "tskey-auth-kbYnZK2CNTRL-SpUVCuzS6P3ApJiDaB6RM3M4b8M9TXgS";
}

View File

@@ -3,9 +3,16 @@
imports = [
../../common/global
../../common/compute-node.nix
../../common/encrypted-btrfs-layout.nix
./hardware.nix
];
diskLayout = {
mainDiskDevice = "/dev/disk/by-id/nvme-INTENSO_SSD_1782403017002453";
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650050-0:0";
keyDiskDevice = "/dev/sda";
};
networking.hostName = "c3";
services.tailscaleAutoconnect.authkey = "tskey-auth-kDNknU5CNTRL-iEGHyo8GDZBCVLaMutJjZBHH7wCuCDyFb";
services.tailscaleAutoconnect.authkey = "tskey-auth-kReCuA7E8M11CNTRL-DBGsB3YLBvQwCdWcviQQtQ1zV23pQpQP4";
}

View File

@@ -10,42 +10,6 @@
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/2b96989f-cec5-40bd-afa8-e5ca0b8f30e9";
fsType = "btrfs";
options = [ "subvol=root" ];
};
boot.initrd.luks.devices."luksroot".device = "/dev/disk/by-uuid/c4a43a18-b480-43ed-87f2-f78a50f03976";
fileSystems."/nix" =
{ device = "/dev/disk/by-uuid/2b96989f-cec5-40bd-afa8-e5ca0b8f30e9";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
fileSystems."/persist" =
{ device = "/dev/disk/by-uuid/2b96989f-cec5-40bd-afa8-e5ca0b8f30e9";
fsType = "btrfs";
options = [ "subvol=persist" ];
};
fileSystems."/var/log" =
{ device = "/dev/disk/by-uuid/2b96989f-cec5-40bd-afa8-e5ca0b8f30e9";
fsType = "btrfs";
options = [ "subvol=log" ];
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/3051-C478";
fsType = "vfat";
};
swapDevices = pkgs.lib.mkForce [ {
device = "/dev/disk/by-id/nvme-eui.000000000000001000080d020035fefd-part2";
randomEncryption.enable = true;
}];
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = true;
}

BIN
hosts/c3/key.bin Normal file

Binary file not shown.

19
hosts/chilly/default.nix Normal file
View File

@@ -0,0 +1,19 @@
{ lib, pkgs, inputs, ... }:
{
imports = [
../../common/global
../../common/base-node.nix
../../common/encrypted-btrfs-layout.nix
./hardware.nix
];
diskLayout = {
mainDiskDevice = "/dev/disk/by-id/ata-FORESEE_512GB_SSD_MP15B03900928";
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777660586-0:0";
keyDiskDevice = "/dev/sdb";
};
networking.hostName = "chilly";
services.tailscaleAutoconnect.authkey = "tskey-auth-kRXS9oPyPm11CNTRL-BE6YnbP9J6ZZuV9dHkX17ZMnm1JGdu93";
services.consul.interface.advertise = lib.mkForce "enp1s0";
}

16
hosts/chilly/hardware.nix Normal file
View File

@@ -0,0 +1,16 @@
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = true;
}

BIN
hosts/chilly/key.bin Normal file

Binary file not shown.

12
hosts/zippy/default.nix Normal file
View File

@@ -0,0 +1,12 @@
{ pkgs, inputs, ... }:
{
imports = [
../../common/global
../../common/compute-node.nix
./hardware.nix
];
networking.hostName = "zippy";
boot.initrd.luks.devices."luksroot".keyFile = "/dev/sdb";
services.tailscaleAutoconnect.authkey = "tskey-auth-ktKyQ59f2p11CNTRL-ut8E71dLWPXsVtb92hevNX9RTjmk4owBf";
}

53
hosts/zippy/hardware.nix Normal file
View File

@@ -0,0 +1,53 @@
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/e009eed5-90cd-4756-b56f-149d876ca934";
fsType = "btrfs";
options = [ "subvol=root" ];
};
boot.initrd.luks.devices."luksroot".device = "/dev/disk/by-uuid/3fb5eb41-34d3-4ef6-8f3e-5db5a788ceef";
fileSystems."/nix" =
{ device = "/dev/disk/by-uuid/e009eed5-90cd-4756-b56f-149d876ca934";
fsType = "btrfs";
options = [ "subvol=nix" ];
};
fileSystems."/persist" =
{ device = "/dev/disk/by-uuid/e009eed5-90cd-4756-b56f-149d876ca934";
fsType = "btrfs";
options = [ "subvol=persist" ];
};
fileSystems."/var/log" =
{ device = "/dev/disk/by-uuid/e009eed5-90cd-4756-b56f-149d876ca934";
fsType = "btrfs";
options = [ "subvol=log" ];
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/F3C9-A38F";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ {
device = "/dev/disk/by-id/ata-KINGSTON_SKC600MS1024G_50026B7785AE0A92-part2";
randomEncryption.enable = true;
}];
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = true;
}

View File

@@ -10,7 +10,7 @@ in
./hardware-configuration.nix
];
networking.hostName = "c1";
networking.hostName = "zippy";
nix.settings.experimental-features = [ "nix-command" "flakes" ];
nix.settings.trusted-users = [ "root" "@wheel" ];
@@ -20,11 +20,11 @@ in
allowDiscards = true;
bypassWorkqueues = true;
keyFileSize = 4096;
keyFile = "/dev/sda";
keyFile = "/dev/sdb";
};
swapDevices = pkgs.lib.mkForce [ {
device = "/dev/disk/by-id/nvme-eui.002538b981b03d98-part2";
device = "/dev/disk/by-id/ata-KINGSTON_SKC600MS1024G_50026B7785AE0A92-part2";
randomEncryption.enable = true;
}];
@@ -134,7 +134,7 @@ in
};
networking.firewall = {
enable = true;
enable = false;
allowedTCPPorts = [ 22 ];
allowedUDPPorts = [ ];
};
@@ -150,6 +150,6 @@ in
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
system.stateVersion = "24.05"; # Did you read the comment?
}

BIN
nixos-setup/keys/zippy.key Normal file

Binary file not shown.

56
nixos-setup/zippy-setup.sh Executable file
View File

@@ -0,0 +1,56 @@
DISK=/dev/disk/by-id/ata-KINGSTON_SKC600MS1024G_50026B7785AE0A92
KEY_DISK=/dev/disk/by-id/usb-Intenso_Micro_Line_22080777660702-0:0
parted "$DISK" -- mklabel gpt
parted -a optimal "$DISK" -- mkpart primary 512MiB -8GB
udevadm trigger
sleep 1
cryptsetup -v --keyfile-size 4096 luksFormat "$DISK"-part1 $KEY_DISK
cryptsetup open --key-file $KEY_DISK --keyfile-size 4096 "$DISK"-part1 luksroot
mkfs.btrfs -f -L btrfs /dev/mapper/luksroot
parted -a optimal "$DISK" -- mkpart primary linux-swap -8GB 100%
parted -a optimal "$DISK" -- mkpart ESP fat32 1MB 512MiB
parted "$DISK" -- set 3 esp on
udevadm trigger
sleep 1
mkfs.vfat "$DISK"-part3
mount /dev/mapper/luksroot /mnt
btrfs subvolume create /mnt/root
btrfs subvolume create /mnt/nix
btrfs subvolume create /mnt/persist
btrfs subvolume create /mnt/log
btrfs subvolume snapshot -r /mnt/root /mnt/root-blank
umount /mnt
mount -o subvol=root,compress=zstd,noatime /dev/mapper/luksroot /mnt
mkdir /mnt/nix
mount -o subvol=nix,compress=zstd,noatime /dev/mapper/luksroot /mnt/nix
mkdir /mnt/persist
mount -o subvol=persist,compress=zstd,noatime /dev/mapper/luksroot /mnt/persist
mkdir -p /mnt/var/log
mount -o subvol=log,compress=zstd,noatime /dev/mapper/luksroot /mnt/var/log
mkdir /mnt/boot
mount "$DISK"-part3 /mnt/boot
nixos-generate-config --root /mnt
# only enable here so that it doesn't get included in hardware-configuration.nix
mkswap -L swap "$DISK"-part2
swapon "$DISK"-part2
cp configuration.nix /mnt/etc/nixos
nixos-install
cp /mnt/etc/nixos/* /mnt/persist/etc/nixos
echo "done!"

View File

@@ -114,5 +114,5 @@ variable "secret_key" {
variable "authentik_version" {
type = string
default = "2023.10.6"
default = "2024.8.1"
}

View File

@@ -66,6 +66,10 @@ data = <<EOH
EOH
destination = "local/clickhouse-user-config.xml"
}
resources {
memory = 1000
}
}
}
}

46
services/evcc.hcl Normal file
View File

@@ -0,0 +1,46 @@
job "evcc" {
datacenters = ["alo"]
meta {
uuid = uuidv4()
}
group "sys" {
network {
port "ui" { to = 7070 }
port "p8887" { static = 8887 }
port "p7090" { static = 7090 }
port "p9522" { static = 9522 }
}
task "server" {
driver = "docker"
config {
image = "evcc/evcc:latest"
ports = [
"ui",
"p8887",
"p7090",
"p9522",
]
volumes = [
"/data/compute/appdata/evcc/evcc.yaml:/etc/evcc.yaml",
"/data/compute/appdata/evcc/evcc:/root/.evcc",
]
}
service {
name = "evcc"
port = "ui"
tags = [
"traefik.enable=true",
"traefik.http.routers.evcc.entryPoints=websecure",
"traefik.http.routers.evcc.middlewares=authentik@file",
]
}
}
}
}

View File

@@ -24,7 +24,7 @@ job "instasync" {
driver = "exec"
config {
command = "/nix/store/qs7j7r5jcvn6ijvdccjnnc7qjqpfgd0l-nodejs-20.11.1/bin/node"
command = "/nix/store/9cwyl546mzksfbvk6gdpjddc7z7m6ixv-nodejs-20.15.1/bin/node"
args = ["/code/instasync/sync.js"]
}

117
services/maps.hcl Normal file
View File

@@ -0,0 +1,117 @@
job "maps" {
datacenters = ["alo"]
# force each evaluation to be different, so that the jobspec changes, so that the latest image is pulled
# otherwise, nomad run ends up not doing anything even if the latest image is different
meta {
uuid = uuidv4()
}
group "qgis" {
network {
port "http" {
to = 8080
}
}
task "py-server" {
driver = "docker"
config {
image = "3liz/qgis-map-server:3.38"
ports = ["http"]
volumes = [
"/data/shared/alo_gis:/alo_gis",
"/data/compute/appdata/maps/qgis-server-plugins:/plugins",
]
}
env {
QGSRV_API_ENABLED_LIZMAP = "yes"
QGSRV_API_ENDPOINTS_LIZMAP = "/ows/lizmap"
QGSRV_API_ENABLED_LANDING_PAGE = "yes"
QGIS_SERVER_LIZMAP_REVEAL_SETTINGS = "yes"
QGSRV_SERVER_WORKERS = 4
QGSRV_LOGGING_LEVEL = "INFO"
QGSRV_SERVER_HTTP_PROXY = "yes"
#QGSRV_SERVER_PROXY_URL = "https://mapserver.v.paler.net"
QGSRV_SERVER_PLUGINPATH = "/plugins"
QGSRV_CACHE_ROOTDIR = "/alo_gis"
QGSRV_CACHE_STRICT_CHECK = "no"
QGSRV_TRUST_LAYER_METADATA = "yes"
QGSRV_DISABLE_GETPRINT = "yes"
}
service {
name = "mapserver"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.mapserver.entryPoints=websecure",
#"traefik.http.routers.mapserver.middlewares=authentik@file",
]
}
resources {
memory = 3000
cpu = 4000
}
}
}
# group "lizmap" {
# network {
# port "http" {
# to = 8080
# }
# }
#
# task "server" {
# driver = "docker"
#
# config {
# image = "3liz/lizmap-web-client:3.8.0-rc.4"
# ports = ["http"]
# volumes = [
# "/data/shared/alo_gis:/srv/projects",
# "/data/compute/appdata/maps/var/lizmap-theme-config:/www/lizmap/var/lizmap-theme-config",
# "/data/compute/appdata/maps/var/lizmap-config:/www/lizmap/var/config",
# "/data/compute/appdata/maps/var/lizmap-db:/www/lizmap/var/db",
# "/data/compute/appdata/maps/var/lizmap-log:/www/lizmap/var/log",
# "/data/compute/appdata/maps/var/lizmap-modules:/www/lizmap/var/lizmap-modules",
# "/data/compute/appdata/maps/var/lizmap-my-packages:/www/lizmap/var/my-packages",
# "/data/compute/appdata/maps/www:/www/lizmap/www",
# "/data/compute/appdata/maps/etc:/srv/etc:ro",
# ]
#
# command = "php-fpm"
# }
#
# env {
# LIZMAP_CACHEREDISDB = "1"
# LIZMAP_CACHEREDISHOST = "redis.service.consul"
# LIZMAP_CACHESTORAGETYPE = "redis"
# LIZMAP_HOME = "/srv/lizmap"
# LIZMAP_WMSSERVERURL = "https://mapserver.v.paler.net/ows/"
# LIZMAP_CONFIG_INCLUDE = "/srv/etc"
# }
#
# service {
# name = "lizmap"
# port = "http"
#
# tags = [
# "traefik.enable=true",
# "traefik.http.routers.lizmap.entryPoints=websecure",
# "traefik.http.routers.lizmap.middlewares=authentik@file",
# ]
# }
#
# resources {
# memory = 2000
# cpu = 1000
# }
# }
# }
}

View File

@@ -18,7 +18,7 @@ job "mysql" {
driver = "docker"
config {
image = "mysql:8.4"
image = "mysql:9.0"
args = [
# 300M, up from default of 100M
"--innodb-redo-log-capacity=314572800",
@@ -46,7 +46,7 @@ job "mysql" {
resources {
cpu = 3000
memory = 1500
memory = 4500
}
}

View File

@@ -69,6 +69,19 @@ job "wordpress" {
"traefik.http.routers.wordpress.rule=Host(`wordpress.paler.net`) || Host(`ines.paler.net`) || Host(`coachingfor.me`) || Host(`coachingfor.work`) || Host(`petru.ines.paler.net`) || Host(`liam.paler.net`) || Host(`tomas.paler.net`) || Host(`musictogethersilvercoast.pt`)",
"traefik.http.routers.wordpress.middlewares=compress@file",
]
check {
type = "http"
port = "http"
path = "/wp-admin/install.php"
interval = "5s"
timeout = "2s"
check_restart {
limit = 3
grace = "60s"
}
}
}
}
}

11
setup-host.txt Normal file
View File

@@ -0,0 +1,11 @@
* boot target from NixOS installer USB
* passwd for nixos user on target
* note IP address, test that ssh as nixos works
* on target: nixos-generate-config --no-filesystems, copy to base host
* on target: dd if=/dev/random of=/dev/disk/by-id/<usb drive for encryption key> bs=4096 count=1
* on target: dd if=/dev/disk/by-id/<usb drive for encryption key> of=key.bin bs=4096 count=1
* copy key.bin to hosts/<target>/
* use the generated config to create new config in hosts/<target>
* set the actual device IDs in hosts/<target>/default.nix
* on base host: nix run github:nix-community/nixos-anywhere -- --flake '.#<target>' nixos@<target IP>
* after confirmed working, update hosts/<target>/default.nix to set keyFile to /dev/sdX (otherwise when the USB drive fails it's harder to replace)

View File

@@ -11,3 +11,13 @@ mysql credentials
postgres credentials
* Put secrets/postgres_password into a Nomad var named secrets/postgresql.postgres_password
adding a new gluster node to the compute volume, with c3 having failed:
(instructions from https://icicimov.github.io/blog/high-availability/Replacing-GlusterFS-failed-node/)
* zippy: sudo mkdir /persist/glusterfs/compute -p
* c1: gluster peer probe 192.168.1.2 (by IP because zippy resolved to a tailscale address)
* c1: gluster volume replace-brick compute c3:/persist/glusterfs/compute/brick1 192.168.1.2:/persist/glusterfs/compute/brick1 commit force
* c1: gluster volume heal compute full
* c1: gluster peer detach c3
same to then later replace 192.168.1.2 with 192.168.1.73