Move common nix code one level up.

This commit is contained in:
2023-11-26 09:36:17 +00:00
parent dfd472ebe8
commit 361dd002d5
26 changed files with 8 additions and 8 deletions

View File

@@ -1,8 +1,8 @@
{ pkgs, inputs, ... }:
{
imports = [
../common/global
../common/compute-node.nix
../../common/global
../../common/compute-node.nix
./hardware.nix
];

View File

@@ -1,8 +1,8 @@
{ pkgs, inputs, ... }:
{
imports = [
../common/global
../common/compute-node.nix
../../common/global
../../common/compute-node.nix
./hardware.nix
];

View File

@@ -1,8 +1,8 @@
{ pkgs, inputs, ... }:
{
imports = [
../common/global
../common/compute-node.nix
../../common/global
../../common/compute-node.nix
./hardware.nix
];

View File

@@ -1,30 +0,0 @@
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.cifs-utils ];
environment.etc."nixos/smb-secrets" = {
text = ''
username=compute
password=aaShecheseiwee5eeh2p
'';
mode = "0400";
};
fileSystems."/data/media" = {
device = "//fractal/media";
fsType = "cifs";
options = let
# this line prevents hanging on network split
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
in ["${automount_opts},credentials=/etc/nixos/smb-secrets"];
};
fileSystems."/data/shared" = {
device = "//fractal/shared";
fsType = "cifs";
options = let
# this line prevents hanging on network split
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
in ["${automount_opts},credentials=/etc/nixos/smb-secrets"];
};
}

View File

@@ -1,15 +0,0 @@
{ pkgs, ... }:
{
imports = [
./cifs-client.nix
./consul.nix
./glusterfs.nix
./glusterfs-client.nix
./impermanence.nix
./nomad.nix
./sshd.nix
./user-ppetru.nix
./unattended-encryption.nix
./systemd-boot.nix
];
}

View File

@@ -1,32 +0,0 @@
{ pkgs, config, ... }:
let
servers = [ "c1" "c2" "c3" ];
server_enabled = builtins.elem config.networking.hostName servers;
in
{
services.consul = {
enable = true;
webUi = true;
interface.advertise = "eno1";
extraConfig = {
client_addr = "0.0.0.0";
datacenter = "alo";
server = server_enabled;
bootstrap_expect = (builtins.length servers + 2) / 2;
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
telemetry = {
prometheus_retention_time = "24h";
disable_hostname = true;
};
};
};
environment.persistence."/persist".directories = [
"/var/lib/consul"
];
networking.firewall = {
allowedTCPPorts = [ 8600 8500 8301 8302 8300 ];
allowedUDPPorts = [ 8600 8301 8302 ];
};
}

View File

@@ -1,3 +0,0 @@
{
powerManagement.cpuFreqGovernor = "ondemand";
}

View File

@@ -1,25 +0,0 @@
{ pkgs, self, ... }:
{
imports = [
./cpufreq.nix
./flakes.nix
./locale.nix
./network.nix
./nix.nix
./packages.nix
./sudo.nix
./tailscale.nix
];
system.copySystemConfiguration = false; # not supported with flakes
# Let 'nixos-version --json' know about the Git revision of the flake
system.configurationRevision = pkgs.lib.mkIf (self ? rev) self.rev;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. It's perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.05"; # Did you read the comment?
}

View File

@@ -1,3 +0,0 @@
{
nix.settings.experimental-features = [ "nix-command" "flakes" ];
}

View File

@@ -1,3 +0,0 @@
{
time.timeZone = "Europe/Lisbon";
}

View File

@@ -1,17 +0,0 @@
{
networking = {
useDHCP = true;
firewall.enable = false;
extraHosts = ''
192.168.1.71 c1
192.168.1.72 c2
192.168.1.73 c3
'';
};
environment.persistence."/persist" = {
directories = [
"/var/db/dhcpcd"
];
};
}

View File

@@ -1,3 +0,0 @@
{
nix.settings.trusted-users = [ "root" "@wheel" ];
}

View File

@@ -1,7 +0,0 @@
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
nodejs_20
vim
];
}

View File

@@ -1,5 +0,0 @@
{
security.sudo = {
wheelNeedsPassword = false;
};
}

View File

@@ -1,14 +0,0 @@
{ config, pkgs, ... }:
let
in
{
imports = [ ./tailscale_lib.nix ];
services.tailscaleAutoconnect.enable = true;
services.tailscale.package = pkgs.unstable.tailscale;
environment.persistence."/persist".directories = [
"/var/lib/tailscale"
];
}

View File

@@ -1,103 +0,0 @@
# https://guekka.github.io/nixos-server-2/
{ config, lib, pkgs, ... }:
with lib; let
cfg = config.services.tailscaleAutoconnect;
in {
options.services.tailscaleAutoconnect = {
enable = mkEnableOption "tailscaleAutoconnect";
authkey = mkOption {
type = types.str;
description = "The authkey to use for authentication with Tailscale";
};
loginServer = mkOption {
type = types.str;
default = "";
description = "The login server to use for authentication with Tailscale";
};
advertiseExitNode = mkOption {
type = types.bool;
default = false;
description = "Whether to advertise this node as an exit node";
};
exitNode = mkOption {
type = types.str;
default = "";
description = "The exit node to use for this node";
};
exitNodeAllowLanAccess = mkOption {
type = types.bool;
default = false;
description = "Whether to allow LAN access to this node";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.authkey != "";
message = "authkey must be set";
}
{
assertion = cfg.exitNodeAllowLanAccess -> cfg.exitNode != "";
message = "exitNodeAllowLanAccess must be false if exitNode is not set";
}
{
assertion = cfg.advertiseExitNode -> cfg.exitNode == "";
message = "advertiseExitNode must be false if exitNode is set";
}
];
systemd.services.tailscale-autoconnect = {
description = "Automatic connection to Tailscale";
# make sure tailscale is running before trying to connect to tailscale
after = ["network-pre.target" "tailscale.service"];
wants = ["network-pre.target" "tailscale.service"];
wantedBy = ["multi-user.target"];
serviceConfig.Type = "oneshot";
script = with pkgs; ''
# wait for tailscaled to settle
sleep 2
# check if we are already authenticated to tailscale
status="$(${tailscale}/bin/tailscale status -json | ${jq}/bin/jq -r .BackendState)"
# if status is not null, then we are already authenticated
echo "tailscale status: $status"
if [ "$status" != "NeedsLogin" ]; then
exit 0
fi
# otherwise authenticate with tailscale
# timeout after 10 seconds to avoid hanging the boot process
${coreutils}/bin/timeout 10 ${tailscale}/bin/tailscale up \
${lib.optionalString (cfg.loginServer != "") "--login-server=${cfg.loginServer}"} \
"--authkey=${cfg.authkey}"
# we have to proceed in two steps because some options are only available
# after authentication
${coreutils}/bin/timeout 10 ${tailscale}/bin/tailscale up \
${lib.optionalString (cfg.loginServer != "") "--login-server=${cfg.loginServer}"} \
${lib.optionalString (cfg.advertiseExitNode) "--advertise-exit-node"} \
${lib.optionalString (cfg.exitNode != "") "--exit-node=${cfg.exitNode}"} \
${lib.optionalString (cfg.exitNodeAllowLanAccess) "--exit-node-allow-lan-access"}
'';
};
networking.firewall = {
trustedInterfaces = [ "tailscale0" ];
allowedUDPPorts = [ config.services.tailscale.port ];
};
services.tailscale = {
enable = true;
useRoutingFeatures = if cfg.advertiseExitNode then "server" else "client";
};
};
}

View File

@@ -1,10 +0,0 @@
{
fileSystems."/data/compute" = {
device = "c1:/compute";
fsType = "glusterfs";
options = [
"backup-volfile-servers=c2:c3"
"_netdev"
];
};
}

View File

@@ -1,13 +0,0 @@
{ pkgs, config, lib, ... }:
{
services.glusterfs = {
enable = true;
};
environment.persistence."/persist".directories = [
"/var/lib/glusterd"
];
# TODO: each volume needs its own port starting at 49152
networking.firewall.allowedTCPPorts = [ 24007 24008 24009 49152 49153 49154 49155 ];
}

View File

@@ -1,74 +0,0 @@
{ pkgs, inputs, ... }:
{
imports = [
inputs.impermanence.nixosModules.impermanence
];
environment.persistence."/persist" = {
directories = [
];
files = [
"/etc/machine-id"
"/etc/ssh/ssh_host_ed25519_key"
"/etc/ssh/ssh_host_ed25519_key.pub"
"/etc/ssh/ssh_host_rsa_key"
"/etc/ssh/ssh_host_rsa_key.pub"
];
};
fileSystems."/".options = ["compress=zstd" "noatime" ];
fileSystems."/nix".options = ["compress=zstd" "noatime" ];
fileSystems."/persist".options = ["compress=zstd" "noatime" ];
fileSystems."/persist".neededForBoot = true;
fileSystems."/var/log".options = ["compress=zstd" "noatime" ];
fileSystems."/var/log".neededForBoot = true;
users.mutableUsers = false;
# rollback results in sudo lectures after each reboot
security.sudo.extraConfig = ''
Defaults lecture = never
'';
# reset / at each boot
# Note `lib.mkBefore` is used instead of `lib.mkAfter` here.
boot.initrd.postDeviceCommands = pkgs.lib.mkBefore ''
mkdir -p /mnt
# We first mount the btrfs root to /mnt
# so we can manipulate btrfs subvolumes.
mount -o subvol=/ /dev/mapper/luksroot /mnt
# While we're tempted to just delete /root and create
# a new snapshot from /root-blank, /root is already
# populated at this point with a number of subvolumes,
# which makes `btrfs subvolume delete` fail.
# So, we remove them first.
#
# /root contains subvolumes:
# - /root/var/lib/portables
# - /root/var/lib/machines
#
# I suspect these are related to systemd-nspawn, but
# since I don't use it I'm not 100% sure.
# Anyhow, deleting these subvolumes hasn't resulted
# in any issues so far, except for fairly
# benign-looking errors from systemd-tmpfiles.
btrfs subvolume list -o /mnt/root |
cut -f9 -d' ' |
while read subvolume; do
echo "deleting /$subvolume subvolume..."
btrfs subvolume delete "/mnt/$subvolume"
done &&
echo "deleting /root subvolume..." &&
btrfs subvolume delete /mnt/root
echo "restoring blank /root subvolume..."
btrfs subvolume snapshot /mnt/root-blank /mnt/root
# Once we're done rolling back to a blank snapshot,
# we can unmount /mnt and continue on the boot process.
umount /mnt
'';
}

View File

@@ -1,88 +0,0 @@
# inspiration: https://github.com/astro/skyflake/blob/main/nixos-modules/nomad.nix
{ pkgs, config, ... }:
let
servers = [ "c1" "c2" "c3" ];
server_enabled = builtins.elem config.networking.hostName servers;
in
{
services.nomad = {
enable = true;
package = pkgs.unstable.nomad;
# true breaks at least CSI volumes
# TODO: consider fixing
dropPrivileges = false;
settings = {
datacenter = "alo";
client = {
enabled = true;
server_join.retry_join = servers;
host_network.tailscale = {
interface = "tailscale0";
cidr = "100.64.0.0/10";
};
host_volume = {
code = {
path = "/data/compute/code";
read_only = true;
};
nix-store = {
path = "/nix/store";
read_only = true;
};
};
};
server = {
enabled = server_enabled;
bootstrap_expect = (builtins.length servers + 2) / 2;
server_join.retry_join = servers;
};
telemetry = {
collection_interval = "1s";
disable_hostname = true;
prometheus_metrics = true;
publish_allocation_metrics = true;
publish_node_metrics = true;
};
};
extraSettingsPaths = [ "/etc/nomad-alo.json" ];
};
systemd.services.nomad.wants = [ "network-online.target" ];
environment.etc."nomad-alo.json".text = builtins.toJSON {
plugin.docker.config = {
allow_privileged = true;
# for keepalived, though only really needing "NET_ADMIN","NET_BROADCAST","NET_RAW" on top of default
# TODO: trim this down
allow_caps = ["all"];
volumes.enabled = true;
extra_labels = [
"job_name"
"task_group_name"
"task_name"
"node_name"
];
};
};
environment.persistence."/persist".directories = [
"/var/lib/docker"
"/var/lib/nomad"
];
environment.systemPackages = with pkgs; [
nomad
wander
damon
];
networking.firewall = {
allowedTCPPorts = if server_enabled then [ 4646 4647 4648 ] else [ 4646 ];
allowedUDPPorts = if server_enabled then [ 4648 ] else [];
};
}

View File

@@ -1,14 +0,0 @@
{
services.openssh = {
enable = true;
allowSFTP = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
};
networking.firewall = {
allowedTCPPorts = [ 22 ];
};
}

View File

@@ -1,5 +0,0 @@
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
}

View File

@@ -1,9 +0,0 @@
{
boot.initrd.kernelModules = [ "usb_storage" ];
boot.initrd.luks.devices."luksroot" = {
allowDiscards = true;
bypassWorkqueues = true;
keyFileSize = 4096;
keyFile = "/dev/sda";
};
}

View File

@@ -1,12 +0,0 @@
{ pkgs, inputs, ... }:
{
imports = [
# not used for this profile but defines options used by the other imports
inputs.impermanence.nixosModules.impermanence
./cifs-client.nix
./consul.nix
./glusterfs-client.nix
./sshd.nix
./user-ppetru.nix
];
}

View File

@@ -1,8 +0,0 @@
{
users.users.ppetru = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net" ];
};
}

View File

@@ -1,8 +1,8 @@
{ pkgs, inputs, ... }:
{
imports = [
../common/global
../common/user-node.nix
../../common/global
../../common/user-node.nix
./hardware.nix
];