Compare commits
219 Commits
da0b60c2e1
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 7fd79c9911 | |||
| 41eacfec02 | |||
| 0a0748b920 | |||
| d6e0e09e87 | |||
| 61c3020a5e | |||
| 972b973f58 | |||
| 8c5a7b78c6 | |||
| 675204816a | |||
| 3bb82dbc6b | |||
| 0f6233c3ec | |||
| 43fa56bf35 | |||
| 50c930eeaf | |||
| 8dde15b8ef | |||
| 6100d8dc69 | |||
| a92f0fcb28 | |||
| bd4604cdcc | |||
| 31db372b43 | |||
| 360e776745 | |||
| 5a819f70bb | |||
| b2c055ffb2 | |||
| 6e0b34843b | |||
| e8485e3bb7 | |||
| e8cd970960 | |||
| 78b59cec4f | |||
| e6d40a9f7e | |||
| 7733a1be46 | |||
| a5df98bc5a | |||
| fb9b0dd2f5 | |||
| 0dc214069c | |||
| a6c4be9530 | |||
| 6e338e6d65 | |||
| 41f16fa0b8 | |||
| 1b05728817 | |||
| 520a417316 | |||
| 88ed5360ca | |||
| 392d40def3 | |||
| 5ef4d832fb | |||
| 49afc0c084 | |||
| b2c82ceaa8 | |||
| b9286d7243 | |||
| 22931e6747 | |||
| ac030018c6 | |||
| 7386d3a5ee | |||
| 2a5a9f2ee9 | |||
| 963a7c10fa | |||
| 283cf9d614 | |||
| 5b3b4ea2ed | |||
| 5a9d5de5c4 | |||
| a5e3f613c2 | |||
| 8b8fac2d89 | |||
| 31d79ba75b | |||
| 6faf148fde | |||
| e88f1c93c5 | |||
| 51375db1e4 | |||
| 9415a8ece2 | |||
| da85ee776d | |||
| e23dc7df5b | |||
| 163b9e4c22 | |||
| d521c3b013 | |||
| d123400ea9 | |||
| 9c64a8ec00 | |||
| 4907238726 | |||
| 37aad7d951 | |||
| ac34f029ed | |||
| 8d04add7dc | |||
| d7a07cebf5 | |||
| 2ba961bfa8 | |||
| 765e92f9c7 | |||
| 1bb202d017 | |||
| 98769f59d6 | |||
| 762037d17f | |||
| 32a22c783d | |||
| 8c29c18287 | |||
| 092a8b3658 | |||
| c7ff79d0c3 | |||
| ac51f50ef5 | |||
| c5347b6eba | |||
| d4525313bb | |||
| 92a27ac92b | |||
| fabfeea1c2 | |||
| 5ce0e0e1df | |||
| bd473d1ad2 | |||
| 064d227344 | |||
| dd8fee0ecb | |||
| a2b54be875 | |||
| ccf6154ba0 | |||
| bd5988dfbc | |||
| a57fc9107b | |||
| a7dce7cfb9 | |||
| b608e110c9 | |||
| 78dee346e9 | |||
| 66f26842c9 | |||
| 9c504e0278 | |||
| 4035d38ab2 | |||
| 53ef2f6293 | |||
| e5cd9bd98e | |||
| 0b51b44856 | |||
| f918ff5df2 | |||
| 4921679140 | |||
| ce7b3bbe16 | |||
| cf2210ec77 | |||
| 1dc219d08f | |||
| b7ef5f89b7 | |||
| 974d10cbe2 | |||
| efb677fd00 | |||
| 7eb11d7573 | |||
| 53ecddb7aa | |||
| 94f71cc62e | |||
| 58bb710cb9 | |||
| 854f663fb0 | |||
| 376b3cd7e4 | |||
| 5d0880a789 | |||
| 09603daf80 | |||
| bbd072abf2 | |||
| 75c60b29e8 | |||
| ef22227ca8 | |||
| 8100aa7070 | |||
| fe2c866115 | |||
| 35f68fb6e8 | |||
| f8aee0d438 | |||
| 2437d46aa9 | |||
| d16ffd9c65 | |||
| 49f159e2a6 | |||
| 17c0f2db2a | |||
| c80a2c9a58 | |||
| 706f46ae77 | |||
| fa603e8aea | |||
| 8032ad4d20 | |||
| 8ce5194ca9 | |||
| a948f26ffb | |||
| f414ac0146 | |||
| 17711da0b6 | |||
| ed06f07116 | |||
| bffc09cbd6 | |||
| f488b710bf | |||
| 65835e1ed0 | |||
| 967ff34a51 | |||
| 1262e03e21 | |||
| a949446d83 | |||
| 99db96e449 | |||
| fe51f1ac5b | |||
| a1089a7cac | |||
| 4d6d2b4d6f | |||
| 9d5a7994eb | |||
| 1465213c90 | |||
| bd15987f8d | |||
| 438d9a44d4 | |||
| 19ba8e3286 | |||
| 0b17a32da5 | |||
| 7cecf5bea6 | |||
| 28887b671a | |||
| e23a791e61 | |||
| 8fde9b0e7c | |||
| c7a53a66a9 | |||
| cc040ed876 | |||
| 0b0ba486a5 | |||
| f0fcea7645 | |||
| d4d8370682 | |||
| 976f3f53c4 | |||
| 46a896cad4 | |||
| 5391385a68 | |||
| e37b64036c | |||
| e26e481152 | |||
| b9793835d4 | |||
| 64e9059a77 | |||
| a3b85f0088 | |||
| 5cd32a1d93 | |||
| dc2c1ecb00 | |||
| fc3cefd1f0 | |||
| 7daf285973 | |||
| 40ae35b255 | |||
| dd186e0ebe | |||
| 73ecc06845 | |||
| 567ed698fb | |||
| f5b5ec9615 | |||
| 38db0f7207 | |||
| 22921200a7 | |||
| d8ca3c27e2 | |||
| 77299dd07a | |||
| b5339141df | |||
| 33bc772960 | |||
| a7aa7e1946 | |||
| 0e9e8c8bed | |||
| 05318c6255 | |||
| aec74345d4 | |||
| 24ab04b098 | |||
| 68a3339794 | |||
| 9ef1cafc32 | |||
| e4ca52b587 | |||
| c5466d559d | |||
| c554069116 | |||
| 5cf9a110e8 | |||
| 61b0edb305 | |||
| 1ca167d135 | |||
| 0098b66de3 | |||
| 11bf328239 | |||
| 2a7447088e | |||
| 2e84537a3f | |||
| e11cfdb1f8 | |||
| d8d73ed2d2 | |||
| 8a56607163 | |||
| 9b9f03fc20 | |||
| 0dbf41d54c | |||
| bded37656a | |||
| d579d0b86b | |||
| c20c620198 | |||
| 046b6819fd | |||
| 27787f3a17 | |||
| cd1f38229a | |||
| 78b6a59160 | |||
| 5d744f394a | |||
| 33b1981146 | |||
| 13c222f783 | |||
| ad5cf2d44e | |||
| 0c84c7fe4f | |||
| a774bb6e3b | |||
| f3f73a16aa | |||
| e140055ef3 | |||
| 5367582155 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,6 @@
|
|||||||
*.swp
|
*.swp
|
||||||
.tmp
|
.tmp
|
||||||
result
|
result
|
||||||
|
.aider*
|
||||||
|
.claude
|
||||||
|
.direnv/
|
||||||
|
|||||||
28
.sops.yaml
28
.sops.yaml
@@ -2,9 +2,12 @@ keys:
|
|||||||
- &admin_ppetru age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
- &admin_ppetru age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
- &server_zippy age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
|
- &server_zippy age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
|
||||||
- &server_chilly age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
|
- &server_chilly age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
|
||||||
|
- &server_sparky age14aml5s3sxksa8qthnt6apl3pu6egxyn0cz7pdzzvp2yl6wncad0q56udyj
|
||||||
|
- &server_stinky age1me78u46409q9ez6fj0qanrfffc5e9kuq7n7uuvlljfwwc2mdaezqmyzxhx
|
||||||
|
- &server_beefy age1cs8uqj243lspyp042ueu5aes4t3azgyuaxl9au70ggrl2meulq4sgqpc7y
|
||||||
- &server_alo_cloud_1 age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
|
- &server_alo_cloud_1 age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
|
||||||
- &server_c1 age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
|
- &server_c1 age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
|
||||||
- &server_c2 age1gekmz8kc8r2lc2x6d4u63s2lnpmres4hu9wulxh29ch74ud7wfksq56xam
|
- &server_c2 age1jy7pe4530s8w904wtvrmpxvteztqy5ewdt92a7y3lq87sg9jce5qxxuydt
|
||||||
- &server_c3 age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
|
- &server_c3 age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
|
||||||
creation_rules:
|
creation_rules:
|
||||||
- path_regex: secrets/common\.yaml
|
- path_regex: secrets/common\.yaml
|
||||||
@@ -13,6 +16,9 @@ creation_rules:
|
|||||||
- *admin_ppetru
|
- *admin_ppetru
|
||||||
- *server_zippy
|
- *server_zippy
|
||||||
- *server_chilly
|
- *server_chilly
|
||||||
|
- *server_sparky
|
||||||
|
- *server_stinky
|
||||||
|
- *server_beefy
|
||||||
- *server_alo_cloud_1
|
- *server_alo_cloud_1
|
||||||
- *server_c1
|
- *server_c1
|
||||||
- *server_c2
|
- *server_c2
|
||||||
@@ -27,6 +33,26 @@ creation_rules:
|
|||||||
- age:
|
- age:
|
||||||
- *admin_ppetru
|
- *admin_ppetru
|
||||||
- *server_chilly
|
- *server_chilly
|
||||||
|
- path_regex: secrets/sparky\.yaml
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_ppetru
|
||||||
|
- *server_sparky
|
||||||
|
- path_regex: secrets/stinky\.yaml
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_ppetru
|
||||||
|
- *server_stinky
|
||||||
|
- path_regex: secrets/beefy\.yaml
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_ppetru
|
||||||
|
- *server_beefy
|
||||||
|
- path_regex: secrets/wifi\.yaml
|
||||||
|
key_groups:
|
||||||
|
- age:
|
||||||
|
- *admin_ppetru
|
||||||
|
- *server_stinky
|
||||||
- path_regex: secrets/alo-cloud-1\.yaml
|
- path_regex: secrets/alo-cloud-1\.yaml
|
||||||
key_groups:
|
key_groups:
|
||||||
- age:
|
- age:
|
||||||
|
|||||||
116
CLAUDE.md
Normal file
116
CLAUDE.md
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
# Claude Code Quick Reference
|
||||||
|
|
||||||
|
NixOS cluster configuration using flakes. Homelab infrastructure with Nomad/Consul orchestration.
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
├── common/
|
||||||
|
│ ├── global/ # Applied to all hosts (backup, sops, users, etc.)
|
||||||
|
│ ├── minimal-node.nix # Base (ssh, user, boot, impermanence)
|
||||||
|
│ ├── cluster-member.nix # Consul agent + storage mounts (NFS/CIFS)
|
||||||
|
│ ├── nomad-worker.nix # Nomad client (runs jobs) + Docker + NFS deps
|
||||||
|
│ ├── nomad-server.nix # Enables Consul + Nomad server mode
|
||||||
|
│ ├── cluster-tools.nix # Just CLI tools (nomad, wander, damon)
|
||||||
|
│ ├── workstation-node.nix # Dev tools (wget, deploy-rs, docker, nix-ld)
|
||||||
|
│ ├── desktop-node.nix # Hyprland + GUI environment
|
||||||
|
│ ├── nfs-services-server.nix # NFS server + btrfs replication
|
||||||
|
│ └── nfs-services-standby.nix # NFS standby + receive replication
|
||||||
|
├── hosts/ # Host configs - check imports for roles
|
||||||
|
├── docs/
|
||||||
|
│ ├── CLUSTER_REVAMP.md # Master plan for architecture changes
|
||||||
|
│ ├── MIGRATION_TODO.md # Tracking checklist for migration
|
||||||
|
│ ├── NFS_FAILOVER.md # NFS failover procedures
|
||||||
|
│ └── AUTH_SETUP.md # Authentication (Pocket ID + Traefik OIDC)
|
||||||
|
└── services/ # Nomad job specs (.hcl files)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Current Architecture
|
||||||
|
|
||||||
|
### Storage Mounts
|
||||||
|
- `/data/services` - NFS from `data-services.service.consul` (check nfs-services-server.nix for primary)
|
||||||
|
- `/data/media` - CIFS from fractal
|
||||||
|
- `/data/shared` - CIFS from fractal
|
||||||
|
|
||||||
|
### Cluster Roles (check hosts/*/default.nix for each host's imports)
|
||||||
|
- **Quorum**: hosts importing `nomad-server.nix` (3 expected for consensus)
|
||||||
|
- **Workers**: hosts importing `nomad-worker.nix` (run Nomad jobs)
|
||||||
|
- **NFS server**: host importing `nfs-services-server.nix` (affinity for direct disk access like DBs)
|
||||||
|
- **Standby**: hosts importing `nfs-services-standby.nix` (receive replication)
|
||||||
|
|
||||||
|
## Config Architecture
|
||||||
|
|
||||||
|
**Modular role-based configs** (compose as needed):
|
||||||
|
- `minimal-node.nix` - Base for all systems (SSH, user, boot, impermanence)
|
||||||
|
- `cluster-member.nix` - Consul agent + shared storage mounts (no Nomad)
|
||||||
|
- `nomad-worker.nix` - Nomad client to run jobs (requires cluster-member)
|
||||||
|
- `nomad-server.nix` - Enables Consul + Nomad server mode (for quorum members)
|
||||||
|
- `cluster-tools.nix` - Just CLI tools (no services)
|
||||||
|
|
||||||
|
**Machine type configs** (via flake profile):
|
||||||
|
- `workstation-node.nix` - Dev tools (deploy-rs, docker, nix-ld, emulation)
|
||||||
|
- `desktop-node.nix` - Extends workstation + Hyprland/GUI
|
||||||
|
|
||||||
|
**Composition patterns**:
|
||||||
|
- Quorum member: `cluster-member + nomad-worker + nomad-server`
|
||||||
|
- Worker only: `cluster-member + nomad-worker`
|
||||||
|
- CLI only: `cluster-member + cluster-tools` (Consul agent, no Nomad service)
|
||||||
|
- NFS primary: `cluster-member + nomad-worker + nfs-services-server`
|
||||||
|
- Standalone: `minimal-node` only (no cluster membership)
|
||||||
|
|
||||||
|
**Key insight**: Profiles (workstation/desktop) don't imply cluster roles. Check imports for actual roles.
|
||||||
|
|
||||||
|
## Key Patterns
|
||||||
|
|
||||||
|
**NFS Server/Standby**:
|
||||||
|
- Primary: imports `nfs-services-server.nix`, sets `standbys = [...]`
|
||||||
|
- Standby: imports `nfs-services-standby.nix`, sets `replicationKeys = [...]`
|
||||||
|
- Replication: btrfs send/receive every 5min, incremental with fallback to full
|
||||||
|
- Check host configs for current primary/standby assignments
|
||||||
|
|
||||||
|
**Backups**:
|
||||||
|
- Kopia client on all nodes → Kopia server on fractal
|
||||||
|
- Backs up `/persist` hourly via btrfs snapshot
|
||||||
|
- Excludes: `services@*` and `services-standby/services@*` (replication snapshots)
|
||||||
|
|
||||||
|
**Secrets**:
|
||||||
|
- SOPS for secrets, files in `secrets/`
|
||||||
|
- Keys managed per-host
|
||||||
|
|
||||||
|
**Authentication**:
|
||||||
|
- Pocket ID (OIDC provider) at `pocket-id.v.paler.net`
|
||||||
|
- Traefik uses `traefik-oidc-auth` plugin for SSO
|
||||||
|
- Services add `middlewares=oidc-auth@file` tag to protect
|
||||||
|
- See `docs/AUTH_SETUP.md` for details
|
||||||
|
|
||||||
|
## Migration Status
|
||||||
|
|
||||||
|
**Phase 3 & 4**: COMPLETE! GlusterFS removed, all services on NFS
|
||||||
|
**Next**: Convert fractal to NixOS (deferred)
|
||||||
|
|
||||||
|
See `docs/MIGRATION_TODO.md` for detailed checklist.
|
||||||
|
|
||||||
|
## Common Tasks
|
||||||
|
|
||||||
|
**Deploy a host**: `deploy -s '.#hostname'`
|
||||||
|
**Deploy all**: `deploy`
|
||||||
|
**Check replication**: Check NFS primary host, then `ssh <primary> journalctl -u replicate-services-to-*.service -f`
|
||||||
|
**NFS failover**: See `docs/NFS_FAILOVER.md`
|
||||||
|
**Nomad jobs**: `services/*.hcl` - service data stored at `/data/services/<service-name>`
|
||||||
|
|
||||||
|
## Troubleshooting Hints
|
||||||
|
|
||||||
|
- Replication errors with "empty stream": SSH key restricted to `btrfs receive`, can't run other commands
|
||||||
|
- NFS split-brain protection: nfs-server checks Consul before starting
|
||||||
|
- Btrfs snapshots: nested snapshots appear as empty dirs in parent snapshots
|
||||||
|
- Kopia: uses temporary snapshot for consistency, doesn't back up nested subvolumes
|
||||||
|
|
||||||
|
## Important Files
|
||||||
|
|
||||||
|
- `common/global/backup.nix` - Kopia backup configuration
|
||||||
|
- `common/nfs-services-server.nix` - NFS server role (check hosts for which imports this)
|
||||||
|
- `common/nfs-services-standby.nix` - NFS standby role (check hosts for which imports this)
|
||||||
|
- `flake.nix` - Host definitions, nixpkgs inputs
|
||||||
|
|
||||||
|
---
|
||||||
|
*Auto-generated reference for Claude Code. Keep concise. Update when architecture changes.*
|
||||||
196
README.md
Normal file
196
README.md
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
# alo-cluster NixOS Configuration
|
||||||
|
|
||||||
|
This repository contains the NixOS configuration for a distributed cluster of machines managed as a unified flake.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
The configuration uses a **layered profile system** that enables code reuse while maintaining clear separation of concerns:
|
||||||
|
|
||||||
|
```
|
||||||
|
minimal-node # Base system (SSH, users, boot, impermanence)
|
||||||
|
↓
|
||||||
|
cluster-node # Cluster services (Consul, GlusterFS, CIFS, encryption)
|
||||||
|
↓
|
||||||
|
server-node # Server workloads (future: MySQL, PostgreSQL)
|
||||||
|
↓
|
||||||
|
workstation-node # Development tools (Docker, deploy-rs, emulation)
|
||||||
|
↓
|
||||||
|
desktop-node # GUI environment (Hyprland, Pipewire, fonts)
|
||||||
|
```
|
||||||
|
|
||||||
|
Each layer extends the previous one, inheriting all configurations. Hosts select a profile level that matches their role.
|
||||||
|
|
||||||
|
### Special Node Types
|
||||||
|
|
||||||
|
- **compute-node**: Cluster + Nomad worker (container orchestration)
|
||||||
|
|
||||||
|
## Directory Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── flake.nix # Main flake definition with all hosts
|
||||||
|
├── common/
|
||||||
|
│ ├── global/ # Global configs applied to all systems
|
||||||
|
│ │ ├── console.nix # Linux console colors (Solarized Dark)
|
||||||
|
│ │ ├── locale.nix # Timezone and locale settings
|
||||||
|
│ │ └── nix.nix # Nix daemon and flake configuration
|
||||||
|
│ ├── minimal-node.nix # Base layer: SSH, users, boot, impermanence
|
||||||
|
│ ├── cluster-node.nix # Cluster layer: Consul, GlusterFS, CIFS
|
||||||
|
│ ├── server-node.nix # Server layer: bare metal services (future)
|
||||||
|
│ ├── workstation-node.nix # Workstation layer: dev tools
|
||||||
|
│ ├── desktop-node.nix # Desktop layer: GUI environment
|
||||||
|
│ ├── compute-node.nix # Nomad worker profile
|
||||||
|
│ └── [feature modules] # Individual feature configs
|
||||||
|
├── hosts/
|
||||||
|
│ ├── c1/ # Compute node 1
|
||||||
|
│ ├── c2/ # Compute node 2
|
||||||
|
│ ├── c3/ # Compute node 3
|
||||||
|
│ ├── alo-cloud-1/ # Cloud VPS
|
||||||
|
│ ├── chilly/ # Server node
|
||||||
|
│ ├── zippy/ # Workstation node
|
||||||
|
│ └── sparky/ # Desktop node
|
||||||
|
├── home/
|
||||||
|
│ ├── default.nix # Home-manager entry point
|
||||||
|
│ ├── profiles/ # Per-profile package sets
|
||||||
|
│ │ ├── server.nix
|
||||||
|
│ │ ├── workstation.nix
|
||||||
|
│ │ └── desktop.nix
|
||||||
|
│ ├── programs/ # Per-profile program configurations
|
||||||
|
│ │ ├── server.nix # CLI tools (fish, tmux, git, nixvim)
|
||||||
|
│ │ ├── workstation.nix # + dev tools
|
||||||
|
│ │ └── desktop.nix # + Hyprland, wofi
|
||||||
|
│ └── common/ # Shared home-manager configs
|
||||||
|
└── services/ # Nomad job definitions (not NixOS)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Profile System
|
||||||
|
|
||||||
|
### System Profiles
|
||||||
|
|
||||||
|
Profiles are automatically applied based on the `mkHost` call in `flake.nix`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
# Example: Desktop profile includes all layers up to desktop-node
|
||||||
|
mkHost "x86_64-linux" "desktop" [
|
||||||
|
./hosts/sparky
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
**Available profiles:**
|
||||||
|
- `"server"` → minimal + cluster + server
|
||||||
|
- `"workstation"` → minimal + cluster + server + workstation
|
||||||
|
- `"desktop"` → minimal + cluster + server + workstation + desktop
|
||||||
|
|
||||||
|
### Home-Manager Profiles
|
||||||
|
|
||||||
|
Home-manager automatically inherits the same profile as the system, configured in `home/default.nix`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
imports = [ ./programs/${profile}.nix ];
|
||||||
|
home.packages = profilePkgs.${profile};
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures system and user configurations stay synchronized.
|
||||||
|
|
||||||
|
## Host Definitions
|
||||||
|
|
||||||
|
### Current Hosts
|
||||||
|
|
||||||
|
| Host | Profile | Role | Hardware |
|
||||||
|
|------|---------|------|----------|
|
||||||
|
| **c1, c2, c3** | compute-node | Nomad workers | Bare metal servers |
|
||||||
|
| **alo-cloud-1** | minimal | Reverse proxy (Traefik) | Cloud VPS |
|
||||||
|
| **chilly** | server | Home Assistant in a VM | Bare metal server |
|
||||||
|
| **zippy** | workstation | Development machine, server | Bare metal server |
|
||||||
|
| **sparky** | desktop | Desktop environment | Bare metal desktop |
|
||||||
|
|
||||||
|
### Adding a New Host
|
||||||
|
|
||||||
|
1. Create host directory:
|
||||||
|
```bash
|
||||||
|
mkdir -p hosts/newhost
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create `hosts/newhost/default.nix`:
|
||||||
|
```nix
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../../common/encrypted-btrfs-layout.nix # or your layout
|
||||||
|
../../common/global
|
||||||
|
./hardware.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
networking.hostName = "newhost";
|
||||||
|
# Host-specific configs here
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Generate hardware config:
|
||||||
|
```bash
|
||||||
|
nixos-generate-config --show-hardware-config > hosts/newhost/hardware.nix
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Add to `flake.nix`:
|
||||||
|
```nix
|
||||||
|
newhost = mkHost "x86_64-linux" "workstation" [
|
||||||
|
./hosts/newhost
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
### Using deploy-rs
|
||||||
|
|
||||||
|
Deploy to specific host:
|
||||||
|
```bash
|
||||||
|
deploy -s '.#sparky'
|
||||||
|
```
|
||||||
|
|
||||||
|
Deploy to all hosts:
|
||||||
|
```bash
|
||||||
|
deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
Deploy with detailed logging:
|
||||||
|
```bash
|
||||||
|
deploy -s '.#sparky' -- --show-trace
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manual Deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nixos-rebuild switch --flake .#sparky --target-host sparky
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
### Impermanence
|
||||||
|
All hosts use tmpfs root with selective persistence. Persistent paths configured per-host in `persistence.directories` and `persistence.files`.
|
||||||
|
|
||||||
|
### Unattended Encryption
|
||||||
|
Cluster nodes support automatic unlocking via Tailscale network using `common/unattended-encryption.nix`.
|
||||||
|
|
||||||
|
### Cluster Services
|
||||||
|
- **Consul**: Service discovery and distributed KV store
|
||||||
|
- **GlusterFS**: Distributed filesystem client
|
||||||
|
- **CIFS/Samba**: Network file sharing
|
||||||
|
|
||||||
|
### Desktop Environment (sparky only)
|
||||||
|
- **Hyprland**: Wayland compositor with CapsLock→Super remapping
|
||||||
|
- **wofi**: Application launcher (Super+D)
|
||||||
|
- **foot**: Terminal emulator (Super+Q)
|
||||||
|
- **greetd/tuigreet**: Login manager with console option
|
||||||
|
|
||||||
|
### Development Tools (workstation/desktop)
|
||||||
|
- Docker with rootless mode
|
||||||
|
- deploy-rs for NixOS deployments
|
||||||
|
- ARM emulation via binfmt
|
||||||
|
- Full NixVim configuration
|
||||||
|
|
||||||
|
## Future Work
|
||||||
|
|
||||||
|
- Migrate Nomad services (MySQL, PostgreSQL) to bare NixOS services under `server-node.nix`
|
||||||
|
- Add monitoring stack (Prometheus, Grafana)
|
||||||
|
- Document Tailscale key rotation process
|
||||||
|
- Add automated testing for configuration changes
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./cifs-client.nix
|
|
||||||
./consul.nix
|
|
||||||
./glusterfs-client.nix
|
|
||||||
./impermanence.nix
|
|
||||||
./sshd.nix
|
|
||||||
./user-ppetru.nix
|
|
||||||
./unattended-encryption.nix
|
|
||||||
./systemd-boot.nix
|
|
||||||
];
|
|
||||||
}
|
|
||||||
41
common/binary-cache-server.nix
Normal file
41
common/binary-cache-server.nix
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
{
|
||||||
|
# Binary cache proxy using ncps (Nix Cache Proxy Server)
|
||||||
|
# Transparently caches packages from cache.nixos.org for faster LAN access
|
||||||
|
#
|
||||||
|
# How it works:
|
||||||
|
# - Acts as HTTP proxy for cache.nixos.org
|
||||||
|
# - Caches packages on first request
|
||||||
|
# - Subsequent requests served from local disk (LAN speed)
|
||||||
|
# - No signing needed (packages already signed by upstream)
|
||||||
|
# - Automatic fallback to cache.nixos.org if this host is down
|
||||||
|
#
|
||||||
|
# Setup:
|
||||||
|
# 1. Deploy this host
|
||||||
|
# 2. Deploy all other hosts (they're already configured to use this)
|
||||||
|
# 3. Cache warms up automatically on first use
|
||||||
|
|
||||||
|
services.ncps = {
|
||||||
|
enable = true;
|
||||||
|
cache = {
|
||||||
|
hostName = config.networking.hostName;
|
||||||
|
# NOTE: These paths are hardcoded to /persist (not using config.custom.impermanence.persistPath)
|
||||||
|
# This is acceptable since this service is only enabled on btrfs-based hosts
|
||||||
|
dataPath = "/persist/ncps/data";
|
||||||
|
tempPath = "/persist/ncps/tmp";
|
||||||
|
databaseURL = "sqlite:/persist/ncps/db/db.sqlite";
|
||||||
|
maxSize = "300G"; # Adjust based on available disk space
|
||||||
|
lru.schedule = "0 3 * * *"; # Clean up daily at 3 AM if over maxSize
|
||||||
|
};
|
||||||
|
server.addr = "0.0.0.0:8501";
|
||||||
|
upstream = {
|
||||||
|
caches = [ "https://cache.nixos.org" ];
|
||||||
|
publicKeys = [
|
||||||
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Open firewall for LAN access
|
||||||
|
networking.firewall.allowedTCPPorts = [ 8501 ];
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
let
|
let
|
||||||
# this line prevents hanging on network split
|
# this line prevents hanging on network split
|
||||||
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
|
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.mount-timeout=5s,nobrl";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.cifs-utils ];
|
environment.systemPackages = [ pkgs.cifs-utils ];
|
||||||
@@ -17,12 +17,12 @@ in
|
|||||||
fileSystems."/data/media" = {
|
fileSystems."/data/media" = {
|
||||||
device = "//fractal/media";
|
device = "//fractal/media";
|
||||||
fsType = "cifs";
|
fsType = "cifs";
|
||||||
options = [ "${automount_opts},credentials=/etc/nixos/smb-secrets" ];
|
options = [ "uid=1000,${automount_opts},credentials=/etc/nixos/smb-secrets" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
fileSystems."/data/shared" = {
|
fileSystems."/data/shared" = {
|
||||||
device = "//fractal/shared";
|
device = "//fractal/shared";
|
||||||
fsType = "cifs";
|
fsType = "cifs";
|
||||||
options = [ "${automount_opts},credentials=/etc/nixos/smb-secrets" ];
|
options = [ "uid=1000,${automount_opts},credentials=/etc/nixos/smb-secrets" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./consul.nix
|
|
||||||
./impermanence.nix
|
|
||||||
./sshd.nix
|
|
||||||
./user-ppetru.nix
|
|
||||||
./systemd-boot.nix
|
|
||||||
];
|
|
||||||
}
|
|
||||||
24
common/cluster-member.nix
Normal file
24
common/cluster-member.nix
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{ pkgs, lib, config, ... }:
|
||||||
|
{
|
||||||
|
# Cluster node configuration
|
||||||
|
# Extends minimal-node with cluster-specific services (Consul, GlusterFS, CIFS, NFS)
|
||||||
|
# Used by: compute nodes (c1, c2, c3)
|
||||||
|
imports = [
|
||||||
|
./minimal-node.nix
|
||||||
|
./unattended-encryption.nix
|
||||||
|
./cifs-client.nix
|
||||||
|
./consul.nix
|
||||||
|
./nfs-services-client.nix # New: NFS client for /data/services
|
||||||
|
];
|
||||||
|
|
||||||
|
options.networking.cluster.primaryInterface = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "eno1";
|
||||||
|
description = "Primary network interface for cluster communication (Consul, NFS, etc.)";
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
# Wait for primary interface to be routable before considering network online
|
||||||
|
systemd.network.wait-online.extraArgs = [ "--interface=${config.networking.cluster.primaryInterface}:routable" ];
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./base-node.nix
|
|
||||||
./glusterfs.nix
|
|
||||||
./nomad.nix
|
|
||||||
./syncthing-data.nix
|
|
||||||
];
|
|
||||||
}
|
|
||||||
@@ -1,44 +1,47 @@
|
|||||||
{ pkgs, config, ... }:
|
{ pkgs, config, lib, ... }:
|
||||||
let
|
let
|
||||||
servers = [
|
servers = [
|
||||||
"c1"
|
"c1"
|
||||||
"c2"
|
"c2"
|
||||||
"c3"
|
"c3"
|
||||||
];
|
];
|
||||||
server_enabled = builtins.elem config.networking.hostName servers;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
services.consul = {
|
options.clusterRole.consulServer = lib.mkEnableOption "Consul server mode";
|
||||||
enable = true;
|
|
||||||
webUi = true;
|
config = {
|
||||||
interface.advertise = "eno1";
|
services.consul = {
|
||||||
extraConfig = {
|
enable = true;
|
||||||
client_addr = "0.0.0.0";
|
webUi = true;
|
||||||
datacenter = "alo";
|
interface.advertise = config.networking.cluster.primaryInterface;
|
||||||
server = server_enabled;
|
extraConfig = {
|
||||||
bootstrap_expect = if server_enabled then (builtins.length servers + 2) / 2 else null;
|
client_addr = "0.0.0.0";
|
||||||
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
|
datacenter = "alo";
|
||||||
|
server = config.clusterRole.consulServer;
|
||||||
|
bootstrap_expect = if config.clusterRole.consulServer then (builtins.length servers + 2) / 2 else null;
|
||||||
|
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
|
||||||
telemetry = {
|
telemetry = {
|
||||||
prometheus_retention_time = "24h";
|
prometheus_retention_time = "24h";
|
||||||
disable_hostname = true;
|
disable_hostname = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.persistence."/persist".directories = [ "/var/lib/consul" ];
|
environment.persistence.${config.custom.impermanence.persistPath}.directories = [ "/var/lib/consul" ];
|
||||||
|
|
||||||
networking.firewall = {
|
networking.firewall = {
|
||||||
allowedTCPPorts = [
|
allowedTCPPorts = [
|
||||||
8600
|
8600
|
||||||
8500
|
8500
|
||||||
8301
|
8301
|
||||||
8302
|
8302
|
||||||
8300
|
8300
|
||||||
];
|
];
|
||||||
allowedUDPPorts = [
|
allowedUDPPorts = [
|
||||||
8600
|
8600
|
||||||
8301
|
8301
|
||||||
8302
|
8302
|
||||||
];
|
];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
{ lib, ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./impermanence.nix # TODO: find a way to avoid needing this here
|
|
||||||
];
|
|
||||||
|
|
||||||
boot.isContainer = true;
|
|
||||||
custom.impermanence.enable = false;
|
|
||||||
custom.tailscale.enable = false;
|
|
||||||
networking.useDHCP = lib.mkForce false;
|
|
||||||
}
|
|
||||||
51
common/desktop-node.nix
Normal file
51
common/desktop-node.nix
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{ pkgs, lib, ... }:
|
||||||
|
{
|
||||||
|
# Desktop profile: Graphical desktop with Hyprland
|
||||||
|
# Extends workstation-node with desktop environment
|
||||||
|
imports = [
|
||||||
|
./workstation-node.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
# omarchy-nix enables NetworkManager, but we use useDHCP globally
|
||||||
|
networking.networkmanager.enable = lib.mkForce false;
|
||||||
|
|
||||||
|
# Enable Hyprland (Wayland compositor)
|
||||||
|
programs.hyprland = {
|
||||||
|
enable = true;
|
||||||
|
xwayland.enable = true; # For compatibility with X11 apps if needed
|
||||||
|
};
|
||||||
|
|
||||||
|
# Essential desktop services
|
||||||
|
services.dbus.enable = true;
|
||||||
|
|
||||||
|
# polkit for privilege escalation
|
||||||
|
security.polkit.enable = true;
|
||||||
|
|
||||||
|
# Enable sound with pipewire
|
||||||
|
security.rtkit.enable = true;
|
||||||
|
services.pipewire = {
|
||||||
|
enable = true;
|
||||||
|
alsa.enable = true;
|
||||||
|
alsa.support32Bit = true;
|
||||||
|
pulse.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Fonts
|
||||||
|
fonts.packages = with pkgs; [
|
||||||
|
noto-fonts
|
||||||
|
noto-fonts-cjk-sans
|
||||||
|
noto-fonts-emoji
|
||||||
|
liberation_ttf
|
||||||
|
fira-code
|
||||||
|
fira-code-symbols
|
||||||
|
];
|
||||||
|
|
||||||
|
# Environment variables for Wayland
|
||||||
|
environment.sessionVariables = {
|
||||||
|
NIXOS_OZONE_WL = "1"; # Hint electron apps to use Wayland
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
prusa-slicer
|
||||||
|
];
|
||||||
|
}
|
||||||
18
common/ethereum.nix
Normal file
18
common/ethereum.nix
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets.lighthouse_jwt = {
|
||||||
|
sopsFile = ./../secrets/${config.networking.hostName}.yaml;
|
||||||
|
};
|
||||||
|
services.ethereum.lighthouse-beacon.mainnet = {
|
||||||
|
enable = true;
|
||||||
|
#package = pkgs.unstable.lighthouse;
|
||||||
|
args = {
|
||||||
|
execution-endpoint = "http://eth1:8551";
|
||||||
|
execution-jwt = config.sops.secrets.lighthouse_jwt.path;
|
||||||
|
checkpoint-sync-url = "https://beaconstate.info";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||||
|
"/var/lib/private/lighthouse-mainnet"
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -6,8 +6,7 @@ let
|
|||||||
btrfs = "${btrfsPkg}/bin/btrfs";
|
btrfs = "${btrfsPkg}/bin/btrfs";
|
||||||
snapshotBackup = pkgs.writeScript "kopia-snapshot-backup" (builtins.readFile ./kopia-snapshot-backup.sh);
|
snapshotBackup = pkgs.writeScript "kopia-snapshot-backup" (builtins.readFile ./kopia-snapshot-backup.sh);
|
||||||
backupScript = pkgs.writeShellScript "backup-persist" ''
|
backupScript = pkgs.writeShellScript "backup-persist" ''
|
||||||
target_path="/persist"
|
target_path="${config.custom.impermanence.persistPath}"
|
||||||
snapshot_path="$target_path/kopia-backup-snapshot"
|
|
||||||
KOPIA_CHECK_FOR_UPDATES=false
|
KOPIA_CHECK_FOR_UPDATES=false
|
||||||
|
|
||||||
${kopia} repository connect server \
|
${kopia} repository connect server \
|
||||||
@@ -16,14 +15,29 @@ let
|
|||||||
-p "$(cat ${config.sops.secrets.kopia.path})" \
|
-p "$(cat ${config.sops.secrets.kopia.path})" \
|
||||||
|| exit 1
|
|| exit 1
|
||||||
|
|
||||||
[ -e "$snapshot_path" ] && ${btrfs} subvolume delete "$snapshot_path"
|
# Check if target_path is on btrfs filesystem
|
||||||
|
fs_type=$(stat -f -c %T "$target_path")
|
||||||
|
|
||||||
${btrfs} subvolume snapshot -r "$target_path" "$snapshot_path"
|
if [ "$fs_type" = "btrfs" ]; then
|
||||||
|
# On btrfs: use snapshot for consistency
|
||||||
|
snapshot_path="$target_path/kopia-backup-snapshot"
|
||||||
|
[ -e "$snapshot_path" ] && ${btrfs} subvolume delete "$snapshot_path"
|
||||||
|
${btrfs} subvolume snapshot -r "$target_path" "$snapshot_path"
|
||||||
|
|
||||||
# --no-send-snapshot-path due to https://github.com/kopia/kopia/issues/4402
|
# --no-send-snapshot-path due to https://github.com/kopia/kopia/issues/4402
|
||||||
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" -- "$snapshot_path"
|
# Exclude btrfs replication snapshots (they appear as empty dirs in the snapshot anyway)
|
||||||
|
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" \
|
||||||
|
--ignore "services@*" \
|
||||||
|
--ignore "services-standby/services@*" \
|
||||||
|
-- "$snapshot_path"
|
||||||
|
|
||||||
|
${btrfs} subvolume delete "$snapshot_path"
|
||||||
|
else
|
||||||
|
# On non-btrfs (e.g., ext4): backup directly without snapshot
|
||||||
|
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" \
|
||||||
|
-- "$target_path"
|
||||||
|
fi
|
||||||
|
|
||||||
${btrfs} subvolume delete "$snapshot_path"
|
|
||||||
${kopia} repository disconnect
|
${kopia} repository disconnect
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
@@ -37,7 +51,7 @@ in
|
|||||||
services."backup-persist" = {
|
services."backup-persist" = {
|
||||||
description = "Backup persistent data with Kopia";
|
description = "Backup persistent data with Kopia";
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
type = "oneshot";
|
Type = "oneshot";
|
||||||
User = "root";
|
User = "root";
|
||||||
ExecStart = "${backupScript}";
|
ExecStart = "${backupScript}";
|
||||||
};
|
};
|
||||||
|
|||||||
44
common/global/console.nix
Normal file
44
common/global/console.nix
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
# Configure Linux console (VT/framebuffer) colors to use Solarized Dark theme
|
||||||
|
# This affects the text-mode console accessed via Ctrl+Alt+F1-F6 or when booting without graphics
|
||||||
|
#
|
||||||
|
# Solarized Dark color scheme by Ethan Schoonover
|
||||||
|
# https://ethanschoonover.com/solarized/
|
||||||
|
#
|
||||||
|
# Color mapping:
|
||||||
|
# 0 = black -> base02 (#073642)
|
||||||
|
# 1 = red -> red (#dc322f)
|
||||||
|
# 2 = green -> green (#859900)
|
||||||
|
# 3 = yellow -> yellow (#b58900)
|
||||||
|
# 4 = blue -> blue (#268bd2)
|
||||||
|
# 5 = magenta -> magenta (#d33682)
|
||||||
|
# 6 = cyan -> cyan (#2aa198)
|
||||||
|
# 7 = white -> base2 (#eee8d5)
|
||||||
|
# 8 = br_black -> base03 (#002b36) - background
|
||||||
|
# 9 = br_red -> orange (#cb4b16)
|
||||||
|
# 10 = br_green -> base01 (#586e75)
|
||||||
|
# 11 = br_yellow -> base00 (#657b83)
|
||||||
|
# 12 = br_blue -> base0 (#839496)
|
||||||
|
# 13 = br_magenta -> violet (#6c71c4)
|
||||||
|
# 14 = br_cyan -> base1 (#93a1a1)
|
||||||
|
# 15 = br_white -> base3 (#fdf6e3)
|
||||||
|
|
||||||
|
console.colors = [
|
||||||
|
"073642" # 0: black (base02)
|
||||||
|
"dc322f" # 1: red
|
||||||
|
"859900" # 2: green
|
||||||
|
"b58900" # 3: yellow
|
||||||
|
"268bd2" # 4: blue
|
||||||
|
"d33682" # 5: magenta
|
||||||
|
"2aa198" # 6: cyan
|
||||||
|
"eee8d5" # 7: white (base2)
|
||||||
|
"002b36" # 8: bright black (base03 - Solarized Dark background)
|
||||||
|
"cb4b16" # 9: bright red (orange)
|
||||||
|
"586e75" # 10: bright green (base01)
|
||||||
|
"657b83" # 11: bright yellow (base00)
|
||||||
|
"839496" # 12: bright blue (base0)
|
||||||
|
"6c71c4" # 13: bright magenta (violet)
|
||||||
|
"93a1a1" # 14: bright cyan (base1)
|
||||||
|
"fdf6e3" # 15: bright white (base3)
|
||||||
|
];
|
||||||
|
}
|
||||||
@@ -2,8 +2,10 @@
|
|||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./backup.nix
|
./backup.nix
|
||||||
|
./console.nix
|
||||||
./cpufreq.nix
|
./cpufreq.nix
|
||||||
./flakes.nix
|
./flakes.nix
|
||||||
|
./impermanence-options.nix
|
||||||
./kernel.nix
|
./kernel.nix
|
||||||
./locale.nix
|
./locale.nix
|
||||||
./network.nix
|
./network.nix
|
||||||
|
|||||||
14
common/global/impermanence-options.nix
Normal file
14
common/global/impermanence-options.nix
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Define impermanence options that need to be available to all modules
|
||||||
|
# The actual impermanence implementation is in common/impermanence.nix or common/impermanence-tmpfs.nix
|
||||||
|
|
||||||
|
options.custom.impermanence.persistPath = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "/persist";
|
||||||
|
description = "Path where persistent data is stored (e.g., /persist for btrfs, /nix/persist for tmpfs)";
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{ lib, config, ... }:
|
||||||
{
|
{
|
||||||
networking = {
|
networking = {
|
||||||
useDHCP = true;
|
useDHCP = true;
|
||||||
@@ -9,7 +10,7 @@
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.persistence."/persist" = {
|
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||||
directories = [ "/var/db/dhcpcd" ];
|
directories = [ "/var/db/dhcpcd" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,30 @@
|
|||||||
{
|
{
|
||||||
nix.settings.trusted-users = [
|
nix.settings = {
|
||||||
"root"
|
trusted-users = [
|
||||||
"@wheel"
|
"root"
|
||||||
];
|
"@wheel"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Binary cache configuration
|
||||||
|
# c3 runs ncps (Nix Cache Proxy Server) that caches cache.nixos.org
|
||||||
|
# Falls back to cache.nixos.org if c3 is unreachable
|
||||||
|
substituters = [
|
||||||
|
"http://c3.mule-stork.ts.net:8501" # Local ncps cache proxy on c3
|
||||||
|
"https://cache.nixos.org"
|
||||||
|
];
|
||||||
|
trusted-public-keys = [
|
||||||
|
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||||
|
"c3:sI3l1RN80xdehzXLA8u2P6352B0SyRPs2XiYy/YWYro="
|
||||||
|
];
|
||||||
|
|
||||||
|
# Performance tuning
|
||||||
|
max-jobs = "auto"; # Use all cores for parallel builds
|
||||||
|
cores = 0; # Each build can use all cores
|
||||||
|
max-substitution-jobs = 16; # Faster fetching from caches
|
||||||
|
http-connections = 25; # More parallel downloads
|
||||||
|
download-attempts = 3; # Retry failed downloads
|
||||||
|
};
|
||||||
|
|
||||||
nix.gc = {
|
nix.gc = {
|
||||||
automatic = true;
|
automatic = true;
|
||||||
dates = "weekly";
|
dates = "weekly";
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
age
|
age
|
||||||
file
|
file
|
||||||
|
killall
|
||||||
lm_sensors # TODO: this shouldn't be installed on cloud nodes
|
lm_sensors # TODO: this shouldn't be installed on cloud nodes
|
||||||
nodejs_20 # TODO: this is for one job on nomad, it should just be a dependency there
|
nodejs_20 # TODO: this is for one job on nomad, it should just be a dependency there
|
||||||
neovim
|
neovim
|
||||||
|
|||||||
@@ -3,8 +3,7 @@
|
|||||||
sops = {
|
sops = {
|
||||||
# sometimes the impermanence bind mount is stopped when sops needs these
|
# sometimes the impermanence bind mount is stopped when sops needs these
|
||||||
age.sshKeyPaths = [
|
age.sshKeyPaths = [
|
||||||
"/persist/etc/ssh/ssh_host_ed25519_key"
|
"${config.custom.impermanence.persistPath}/etc/ssh/ssh_host_ed25519_key"
|
||||||
"/persist/etc/ssh/ssh_host_rsa_key"
|
|
||||||
];
|
];
|
||||||
defaultSopsFile = ./../../secrets/common.yaml;
|
defaultSopsFile = ./../../secrets/common.yaml;
|
||||||
secrets = {
|
secrets = {
|
||||||
|
|||||||
@@ -22,6 +22,6 @@ in
|
|||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
services.tailscaleAutoconnect.enable = true;
|
services.tailscaleAutoconnect.enable = true;
|
||||||
services.tailscale.package = pkgs.unstable.tailscale;
|
services.tailscale.package = pkgs.unstable.tailscale;
|
||||||
environment.persistence."/persist".directories = [ "/var/lib/tailscale" ];
|
environment.persistence.${config.custom.impermanence.persistPath}.directories = [ "/var/lib/tailscale" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
{ pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment.systemPackages = [ pkgs.glusterfs ];
|
|
||||||
|
|
||||||
fileSystems."/data/compute" = {
|
|
||||||
device = "192.168.1.71:/compute";
|
|
||||||
fsType = "glusterfs";
|
|
||||||
options = [
|
|
||||||
"backup-volfile-servers=192.168.1.72:192.168.1.73"
|
|
||||||
"_netdev"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
{
|
|
||||||
pkgs,
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
{
|
|
||||||
services.glusterfs = {
|
|
||||||
enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
environment.persistence."/persist".directories = [ "/var/lib/glusterd" ];
|
|
||||||
|
|
||||||
# TODO: each volume needs its own port starting at 49152
|
|
||||||
networking.firewall.allowedTCPPorts = [
|
|
||||||
24007
|
|
||||||
24008
|
|
||||||
24009
|
|
||||||
49152
|
|
||||||
49153
|
|
||||||
49154
|
|
||||||
49155
|
|
||||||
];
|
|
||||||
}
|
|
||||||
30
common/impermanence-common.nix
Normal file
30
common/impermanence-common.nix
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Common impermanence configuration shared by both btrfs and tmpfs variants
|
||||||
|
# This module should be imported by impermanence.nix or impermanence-tmpfs.nix
|
||||||
|
# The option custom.impermanence.persistPath is defined in common/global/impermanence-options.nix
|
||||||
|
|
||||||
|
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||||
|
directories = [
|
||||||
|
"/var/lib/nixos"
|
||||||
|
"/home"
|
||||||
|
];
|
||||||
|
files = [
|
||||||
|
"/etc/machine-id"
|
||||||
|
"/etc/ssh/ssh_host_ed25519_key"
|
||||||
|
"/etc/ssh/ssh_host_ed25519_key.pub"
|
||||||
|
"/etc/ssh/ssh_host_rsa_key"
|
||||||
|
"/etc/ssh/ssh_host_rsa_key.pub"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
users.mutableUsers = false;
|
||||||
|
|
||||||
|
security.sudo.extraConfig = ''
|
||||||
|
Defaults lecture = never
|
||||||
|
'';
|
||||||
|
}
|
||||||
30
common/impermanence-tmpfs.nix
Normal file
30
common/impermanence-tmpfs.nix
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Impermanence configuration for tmpfs root filesystem
|
||||||
|
# Used for systems with tmpfs root (e.g., Raspberry Pi with SD card)
|
||||||
|
# Root is in-memory and wiped on every boot
|
||||||
|
# Persistent data is stored in /nix/persist (directory on the /nix partition)
|
||||||
|
|
||||||
|
# Import common impermanence configuration
|
||||||
|
imports = [ ./impermanence-common.nix ];
|
||||||
|
|
||||||
|
config = {
|
||||||
|
# Use /nix/persist for tmpfs-based impermanence
|
||||||
|
custom.impermanence.persistPath = "/nix/persist";
|
||||||
|
|
||||||
|
# tmpfs root filesystem
|
||||||
|
fileSystems."/" = {
|
||||||
|
device = "none";
|
||||||
|
fsType = "tmpfs";
|
||||||
|
options = [
|
||||||
|
"defaults"
|
||||||
|
"size=2G"
|
||||||
|
"mode=755"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
{
|
{
|
||||||
pkgs,
|
pkgs,
|
||||||
inputs,
|
|
||||||
lib,
|
lib,
|
||||||
config,
|
config,
|
||||||
...
|
...
|
||||||
@@ -9,28 +8,22 @@ let
|
|||||||
cfg = config.custom.impermanence;
|
cfg = config.custom.impermanence;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
# Import common impermanence configuration
|
||||||
|
imports = [ ./impermanence-common.nix ];
|
||||||
|
|
||||||
options.custom.impermanence = {
|
options.custom.impermanence = {
|
||||||
enable = lib.mkOption {
|
enable = lib.mkOption {
|
||||||
type = lib.types.bool;
|
type = lib.types.bool;
|
||||||
default = true;
|
default = true;
|
||||||
description = "Enable impermanent root fs";
|
description = "Enable impermanent root fs with btrfs subvolume rollback";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
config = lib.mkIf cfg.enable {
|
||||||
environment.persistence = {
|
# Use /persist for btrfs-based impermanence
|
||||||
"/persist" = {
|
custom.impermanence.persistPath = "/persist";
|
||||||
directories = [ "/var/lib/nixos" ];
|
|
||||||
files = [
|
|
||||||
"/etc/machine-id"
|
|
||||||
"/etc/ssh/ssh_host_ed25519_key"
|
|
||||||
"/etc/ssh/ssh_host_ed25519_key.pub"
|
|
||||||
"/etc/ssh/ssh_host_rsa_key"
|
|
||||||
"/etc/ssh/ssh_host_rsa_key.pub"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
|
# Btrfs-specific filesystem options
|
||||||
fileSystems."/".options = [
|
fileSystems."/".options = [
|
||||||
"compress=zstd"
|
"compress=zstd"
|
||||||
"noatime"
|
"noatime"
|
||||||
@@ -50,21 +43,11 @@ in
|
|||||||
];
|
];
|
||||||
fileSystems."/var/log".neededForBoot = true;
|
fileSystems."/var/log".neededForBoot = true;
|
||||||
|
|
||||||
users.mutableUsers = false;
|
# Btrfs subvolume rollback at each boot
|
||||||
|
|
||||||
# rollback results in sudo lectures after each reboot
|
|
||||||
security.sudo.extraConfig = ''
|
|
||||||
Defaults lecture = never
|
|
||||||
'';
|
|
||||||
|
|
||||||
# needed for allowOther in the home-manager impermanence config
|
|
||||||
programs.fuse.userAllowOther = true;
|
|
||||||
|
|
||||||
# reset / at each boot
|
|
||||||
# Note `lib.mkBefore` is used instead of `lib.mkAfter` here.
|
# Note `lib.mkBefore` is used instead of `lib.mkAfter` here.
|
||||||
boot.initrd.postDeviceCommands = pkgs.lib.mkBefore ''
|
boot.initrd.postDeviceCommands = pkgs.lib.mkBefore ''
|
||||||
mkdir /mnt
|
mkdir /mnt
|
||||||
mount /dev/mapper/luksroot /mnt
|
mount ${config.fileSystems."/".device} /mnt
|
||||||
if [[ -e /mnt/root ]]; then
|
if [[ -e /mnt/root ]]; then
|
||||||
mkdir -p /mnt/old_roots
|
mkdir -p /mnt/old_roots
|
||||||
timestamp=$(date --date="@$(stat -c %Y /mnt/root)" "+%Y-%m-%-d_%H:%M:%S")
|
timestamp=$(date --date="@$(stat -c %Y /mnt/root)" "+%Y-%m-%-d_%H:%M:%S")
|
||||||
|
|||||||
13
common/minimal-node.nix
Normal file
13
common/minimal-node.nix
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
# Minimal base configuration for all NixOS systems
|
||||||
|
# Provides: SSH access, user management, boot, impermanence
|
||||||
|
# Note: unattended-encryption is NOT included by default - add it explicitly where needed
|
||||||
|
imports = [
|
||||||
|
./impermanence.nix
|
||||||
|
./resource-limits.nix
|
||||||
|
./sshd.nix
|
||||||
|
./user-ppetru.nix
|
||||||
|
./systemd-boot.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
29
common/nfs-services-client.nix
Normal file
29
common/nfs-services-client.nix
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
# NFS client for /data/services
|
||||||
|
# Mounts from data-services.service.consul (Consul DNS for automatic failover)
|
||||||
|
# The NFS server registers itself in Consul, so this will automatically
|
||||||
|
# point to whichever host is currently running the NFS server
|
||||||
|
#
|
||||||
|
# Uses persistent mount (not automount) with nofail to prevent blocking boot.
|
||||||
|
# The mount is established at boot time and persists - no auto-unmount.
|
||||||
|
# This prevents issues with Docker bind mounts seeing empty automount stubs.
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
./wait-for-dns-ready.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
fileSystems."/data/services" = {
|
||||||
|
device = "data-services.service.consul:/persist/services";
|
||||||
|
fsType = "nfs";
|
||||||
|
options = [
|
||||||
|
"nofail" # Don't block boot if mount fails
|
||||||
|
"x-systemd.mount-timeout=30s" # Timeout for mount attempts
|
||||||
|
"x-systemd.after=wait-for-dns-ready.service" # Wait for DNS to actually work
|
||||||
|
"_netdev" # Network filesystem (wait for network)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Ensure NFS client packages are available
|
||||||
|
environment.systemPackages = [ pkgs.nfs-utils ];
|
||||||
|
}
|
||||||
201
common/nfs-services-server.nix
Normal file
201
common/nfs-services-server.nix
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.nfsServicesServer;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.nfsServicesServer = {
|
||||||
|
enable = lib.mkEnableOption "NFS services server" // { default = true; };
|
||||||
|
|
||||||
|
standbys = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
default = [];
|
||||||
|
description = ''
|
||||||
|
List of standby hostnames to replicate to (e.g. ["c1"]).
|
||||||
|
|
||||||
|
Requires one-time setup on the NFS server:
|
||||||
|
sudo mkdir -p /persist/root/.ssh
|
||||||
|
sudo ssh-keygen -t ed25519 -f /persist/root/.ssh/btrfs-replication -N "" -C "root@$(hostname)-replication"
|
||||||
|
|
||||||
|
Then add the public key to each standby's nfsServicesStandby.replicationKeys option.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
# Persist root SSH directory for replication key
|
||||||
|
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||||
|
directories = [
|
||||||
|
"/root/.ssh"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Bind mount /persist/services to /data/services for local access
|
||||||
|
# This makes the path consistent with NFS clients
|
||||||
|
# Use mkForce to override the NFS client mount from cluster-node.nix
|
||||||
|
fileSystems."/data/services" = lib.mkForce {
|
||||||
|
device = "/persist/services";
|
||||||
|
fsType = "none";
|
||||||
|
options = [ "bind" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Nomad node metadata: mark this as the primary storage node
|
||||||
|
# Jobs can constrain to ${meta.storage_role} = "primary"
|
||||||
|
services.nomad.settings.client.meta = {
|
||||||
|
storage_role = "primary";
|
||||||
|
};
|
||||||
|
|
||||||
|
# NFS server configuration
|
||||||
|
services.nfs.server = {
|
||||||
|
enable = true;
|
||||||
|
exports = ''
|
||||||
|
/persist/services 192.168.1.0/24(rw,sync,no_subtree_check,no_root_squash)
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Consul service registration for NFS
|
||||||
|
services.consul.extraConfig.services = [{
|
||||||
|
name = "data-services";
|
||||||
|
port = 2049;
|
||||||
|
checks = [{
|
||||||
|
tcp = "localhost:2049";
|
||||||
|
interval = "30s";
|
||||||
|
}];
|
||||||
|
}];
|
||||||
|
|
||||||
|
# Firewall for NFS
|
||||||
|
networking.firewall.allowedTCPPorts = [ 2049 111 20048 ];
|
||||||
|
networking.firewall.allowedUDPPorts = [ 2049 111 20048 ];
|
||||||
|
|
||||||
|
# systemd services: NFS server split-brain check + replication services
|
||||||
|
systemd.services = lib.mkMerge ([
|
||||||
|
# Safety check: prevent split-brain by ensuring no other NFS server is active
|
||||||
|
{
|
||||||
|
nfs-server = {
|
||||||
|
preStart = ''
|
||||||
|
# Wait for Consul to be available
|
||||||
|
for i in {1..30}; do
|
||||||
|
if ${pkgs.netcat}/bin/nc -z localhost 8600; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting for Consul DNS... ($i/30)"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if another NFS server is already registered in Consul
|
||||||
|
CURRENT_SERVER=$(${pkgs.dnsutils}/bin/dig +short @localhost -p 8600 data-services.service.consul | head -1 || true)
|
||||||
|
MY_IP=$(${pkgs.iproute2}/bin/ip -4 addr show | ${pkgs.gnugrep}/bin/grep -oP '(?<=inet\s)\d+(\.\d+){3}' | ${pkgs.gnugrep}/bin/grep -v '^127\.' | head -1)
|
||||||
|
|
||||||
|
if [ -n "$CURRENT_SERVER" ] && [ "$CURRENT_SERVER" != "$MY_IP" ]; then
|
||||||
|
echo "ERROR: Another NFS server is already active at $CURRENT_SERVER"
|
||||||
|
echo "This host ($MY_IP) is configured as NFS server but should be standby."
|
||||||
|
echo "To fix:"
|
||||||
|
echo " 1. If this is intentional (failback), first demote the other server"
|
||||||
|
echo " 2. Update this host's config to use nfs-services-standby.nix instead"
|
||||||
|
echo " 3. Sync data from active server before promoting this host"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "NFS server startup check passed (no other active server found)"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
] ++ (lib.forEach cfg.standbys (standby: {
|
||||||
|
"replicate-services-to-${standby}" = {
|
||||||
|
description = "Replicate /persist/services to ${standby}";
|
||||||
|
path = [ pkgs.btrfs-progs pkgs.openssh pkgs.coreutils pkgs.findutils pkgs.gnugrep pkgs.curl ];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
START_TIME=$(date +%s)
|
||||||
|
REPLICATION_SUCCESS=0
|
||||||
|
|
||||||
|
SSH_KEY="/persist/root/.ssh/btrfs-replication"
|
||||||
|
if [ ! -f "$SSH_KEY" ]; then
|
||||||
|
echo "ERROR: SSH key not found at $SSH_KEY"
|
||||||
|
echo "Run: sudo ssh-keygen -t ed25519 -f $SSH_KEY -N \"\" -C \"root@$(hostname)-replication\""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
SNAPSHOT_NAME="services@$(date +%Y%m%d-%H%M%S)"
|
||||||
|
SNAPSHOT_PATH="/persist/$SNAPSHOT_NAME"
|
||||||
|
|
||||||
|
# Create readonly snapshot
|
||||||
|
btrfs subvolume snapshot -r /persist/services "$SNAPSHOT_PATH"
|
||||||
|
|
||||||
|
# Find previous snapshot on sender (sort by name since readonly snapshots have same mtime)
|
||||||
|
# Use -d to list directories only, not their contents
|
||||||
|
PREV_LOCAL=$(ls -1d /persist/services@* 2>/dev/null | grep -v "^$SNAPSHOT_PATH$" | sort -r | head -1 || true)
|
||||||
|
|
||||||
|
# Try incremental send if we have a parent, fall back to full send if it fails
|
||||||
|
if [ -n "$PREV_LOCAL" ]; then
|
||||||
|
echo "Attempting incremental send from $(basename $PREV_LOCAL) to ${standby}"
|
||||||
|
|
||||||
|
# Try incremental send, if it fails (e.g., parent missing on receiver), fall back to full
|
||||||
|
# Use -c to help with broken Received UUID chains
|
||||||
|
if btrfs send -p "$PREV_LOCAL" -c "$PREV_LOCAL" "$SNAPSHOT_PATH" | \
|
||||||
|
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||||
|
"btrfs receive /persist/services-standby"; then
|
||||||
|
echo "Incremental send completed successfully"
|
||||||
|
REPLICATION_SUCCESS=1
|
||||||
|
else
|
||||||
|
echo "Incremental send failed (likely missing parent on receiver), falling back to full send"
|
||||||
|
# Plain full send without clone source (receiver may have no snapshots)
|
||||||
|
btrfs send "$SNAPSHOT_PATH" | \
|
||||||
|
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||||
|
"btrfs receive /persist/services-standby"
|
||||||
|
REPLICATION_SUCCESS=1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# First snapshot, do full send
|
||||||
|
echo "Full send to ${standby} (first snapshot)"
|
||||||
|
btrfs send "$SNAPSHOT_PATH" | \
|
||||||
|
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||||
|
"btrfs receive /persist/services-standby"
|
||||||
|
REPLICATION_SUCCESS=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup old snapshots on sender (keep last 10 snapshots, sorted by name/timestamp)
|
||||||
|
ls -1d /persist/services@* 2>/dev/null | sort | head -n -10 | xargs -r btrfs subvolume delete
|
||||||
|
|
||||||
|
# Calculate metrics
|
||||||
|
END_TIME=$(date +%s)
|
||||||
|
DURATION=$((END_TIME - START_TIME))
|
||||||
|
SNAPSHOT_COUNT=$(ls -1d /persist/services@* 2>/dev/null | wc -l)
|
||||||
|
|
||||||
|
# Push metrics to Prometheus pushgateway
|
||||||
|
cat <<METRICS | curl -s --data-binary @- http://pushgateway.service.consul:9091/metrics/job/nfs_replication/instance/${standby} || true
|
||||||
|
# TYPE nfs_replication_last_success_timestamp gauge
|
||||||
|
nfs_replication_last_success_timestamp $END_TIME
|
||||||
|
# TYPE nfs_replication_duration_seconds gauge
|
||||||
|
nfs_replication_duration_seconds $DURATION
|
||||||
|
# TYPE nfs_replication_snapshot_count gauge
|
||||||
|
nfs_replication_snapshot_count $SNAPSHOT_COUNT
|
||||||
|
# TYPE nfs_replication_success gauge
|
||||||
|
nfs_replication_success $REPLICATION_SUCCESS
|
||||||
|
METRICS
|
||||||
|
'';
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
User = "root";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
|
||||||
|
systemd.timers = lib.mkMerge (
|
||||||
|
lib.forEach cfg.standbys (standby: {
|
||||||
|
"replicate-services-to-${standby}" = {
|
||||||
|
description = "Timer for replicating /persist/services to ${standby}";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = "*:0/5"; # Every 5 minutes
|
||||||
|
Persistent = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
})
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
79
common/nfs-services-standby.nix
Normal file
79
common/nfs-services-standby.nix
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.nfsServicesStandby;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.nfsServicesStandby = {
|
||||||
|
enable = lib.mkEnableOption "NFS services standby" // { default = true; };
|
||||||
|
|
||||||
|
replicationKeys = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
default = [];
|
||||||
|
description = ''
|
||||||
|
SSH public keys authorized to replicate btrfs snapshots to this standby.
|
||||||
|
These keys are restricted to only run 'btrfs receive /persist/services-standby'.
|
||||||
|
|
||||||
|
Get the public key from the NFS server:
|
||||||
|
ssh <nfs-server> sudo cat /persist/root/.ssh/btrfs-replication.pub
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
# Allow root SSH login for replication (restricted by command= in authorized_keys)
|
||||||
|
# This is configured in common/sshd.nix
|
||||||
|
|
||||||
|
# Restricted SSH keys for btrfs replication
|
||||||
|
users.users.root.openssh.authorizedKeys.keys =
|
||||||
|
map (key: ''command="btrfs receive /persist/services-standby",restrict ${key}'') cfg.replicationKeys;
|
||||||
|
|
||||||
|
# Mount point for services-standby subvolume
|
||||||
|
# This is just declarative documentation - the subvolume must be created manually once:
|
||||||
|
# sudo btrfs subvolume create /persist/services-standby
|
||||||
|
# After that, it will persist across reboots (it's under /persist)
|
||||||
|
fileSystems."/persist/services-standby" = {
|
||||||
|
device = "/persist/services-standby";
|
||||||
|
fsType = "none";
|
||||||
|
options = [ "bind" ];
|
||||||
|
noCheck = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Cleanup old snapshots on standby (keep last 10 snapshots)
|
||||||
|
systemd.services.cleanup-services-standby-snapshots = {
|
||||||
|
description = "Cleanup old btrfs snapshots in services-standby";
|
||||||
|
path = [ pkgs.btrfs-progs pkgs.findutils pkgs.coreutils pkgs.curl ];
|
||||||
|
script = ''
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Cleanup old snapshots on standby (keep last 10 snapshots, sorted by name/timestamp)
|
||||||
|
ls -1d /persist/services-standby/services@* 2>/dev/null | sort | head -n -10 | xargs -r btrfs subvolume delete || true
|
||||||
|
|
||||||
|
# Calculate metrics
|
||||||
|
CLEANUP_TIME=$(date +%s)
|
||||||
|
SNAPSHOT_COUNT=$(ls -1d /persist/services-standby/services@* 2>/dev/null | wc -l)
|
||||||
|
|
||||||
|
# Push metrics to Prometheus pushgateway
|
||||||
|
cat <<METRICS | curl -s --data-binary @- http://pushgateway.service.consul:9091/metrics/job/nfs_standby_cleanup/instance/$(hostname) || true
|
||||||
|
# TYPE nfs_standby_snapshot_count gauge
|
||||||
|
nfs_standby_snapshot_count $SNAPSHOT_COUNT
|
||||||
|
# TYPE nfs_standby_cleanup_last_run_timestamp gauge
|
||||||
|
nfs_standby_cleanup_last_run_timestamp $CLEANUP_TIME
|
||||||
|
METRICS
|
||||||
|
'';
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
User = "root";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.timers.cleanup-services-standby-snapshots = {
|
||||||
|
description = "Timer for cleaning up old snapshots on standby";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = "hourly";
|
||||||
|
Persistent = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
9
common/nomad-server.nix
Normal file
9
common/nomad-server.nix
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
# Enable server mode for both Consul and Nomad
|
||||||
|
# Used by: c1, c2, c3 (quorum members)
|
||||||
|
clusterRole = {
|
||||||
|
consulServer = true;
|
||||||
|
nomadServer = true;
|
||||||
|
};
|
||||||
|
}
|
||||||
9
common/nomad-worker.nix
Normal file
9
common/nomad-worker.nix
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
# Enable Nomad client to run workloads
|
||||||
|
# Includes: Nomad client, Docker plugin, host volumes, NFS mount dependencies
|
||||||
|
# Used by: c1, c2, c3, zippy (all nodes that run Nomad jobs)
|
||||||
|
imports = [
|
||||||
|
./nomad.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
234
common/nomad.nix
234
common/nomad.nix
@@ -1,109 +1,177 @@
|
|||||||
# inspiration: https://github.com/astro/skyflake/blob/main/nixos-modules/nomad.nix
|
# inspiration: https://github.com/astro/skyflake/blob/main/nixos-modules/nomad.nix
|
||||||
{ pkgs, config, ... }:
|
{ pkgs, config, lib, ... }:
|
||||||
let
|
let
|
||||||
servers = [
|
servers = [
|
||||||
"c1"
|
"c1"
|
||||||
"c2"
|
"c2"
|
||||||
"c3"
|
"c3"
|
||||||
];
|
];
|
||||||
server_enabled = builtins.elem config.networking.hostName servers;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
services.nomad = {
|
options.clusterRole.nomadServer = lib.mkEnableOption "Nomad server mode";
|
||||||
enable = true;
|
|
||||||
# true breaks at least CSI volumes
|
|
||||||
# TODO: consider fixing
|
|
||||||
dropPrivileges = false;
|
|
||||||
|
|
||||||
settings = {
|
config = {
|
||||||
datacenter = "alo";
|
services.nomad = {
|
||||||
|
enable = true;
|
||||||
|
# true breaks at least CSI volumes
|
||||||
|
# TODO: consider fixing
|
||||||
|
dropPrivileges = false;
|
||||||
|
|
||||||
client = {
|
settings = {
|
||||||
enabled = true;
|
datacenter = "alo";
|
||||||
server_join.retry_join = servers;
|
|
||||||
host_network.tailscale = {
|
client = {
|
||||||
interface = "tailscale0";
|
enabled = true;
|
||||||
cidr = "100.64.0.0/10";
|
server_join.retry_join = servers;
|
||||||
|
host_network.tailscale = {
|
||||||
|
interface = "tailscale0";
|
||||||
|
cidr = "100.64.0.0/10";
|
||||||
|
};
|
||||||
|
host_volume = {
|
||||||
|
services = {
|
||||||
|
path = "/data/services";
|
||||||
|
read_only = false;
|
||||||
|
};
|
||||||
|
nix-store = {
|
||||||
|
path = "/nix/store";
|
||||||
|
read_only = true;
|
||||||
|
};
|
||||||
|
sw = {
|
||||||
|
path = "/run/current-system/sw";
|
||||||
|
read_only = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
host_volume = {
|
|
||||||
code = {
|
server = {
|
||||||
path = "/data/compute/code";
|
enabled = config.clusterRole.nomadServer;
|
||||||
read_only = true;
|
bootstrap_expect = (builtins.length servers + 2) / 2;
|
||||||
};
|
server_join.retry_join = servers;
|
||||||
appdata = {
|
};
|
||||||
path = "/data/compute/appdata";
|
|
||||||
read_only = false;
|
telemetry = {
|
||||||
};
|
collection_interval = "1s";
|
||||||
nix-store = {
|
disable_hostname = true;
|
||||||
path = "/nix/store";
|
prometheus_metrics = true;
|
||||||
read_only = true;
|
publish_allocation_metrics = true;
|
||||||
};
|
publish_node_metrics = true;
|
||||||
sw = {
|
|
||||||
path = "/run/current-system/sw";
|
|
||||||
read_only = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
server = {
|
extraSettingsPaths = [ "/etc/nomad-alo.json" ];
|
||||||
enabled = server_enabled;
|
};
|
||||||
bootstrap_expect = (builtins.length servers + 2) / 2;
|
|
||||||
server_join.retry_join = servers;
|
|
||||||
};
|
|
||||||
|
|
||||||
telemetry = {
|
# NFS mount dependency configuration for Nomad:
|
||||||
collection_interval = "1s";
|
#
|
||||||
disable_hostname = true;
|
# Problem: Docker bind mounts need the real NFS mount, not an empty stub.
|
||||||
prometheus_metrics = true;
|
# If Nomad starts before NFS is mounted, containers get empty directories.
|
||||||
publish_allocation_metrics = true;
|
#
|
||||||
publish_node_metrics = true;
|
# Solution: Use soft dependencies (wants/after) with health-checking recovery.
|
||||||
|
# - wants: Nomad wants the mount, but won't be killed if it goes away
|
||||||
|
# - after: Nomad waits for mount to be attempted before starting
|
||||||
|
# - ExecStartPre with findmnt: Blocks Nomad start until mount is actually active
|
||||||
|
#
|
||||||
|
# This prevents Docker race conditions while allowing:
|
||||||
|
# - Boot to proceed if NFS unavailable (Nomad fails to start, systemd retries)
|
||||||
|
# - Nomad to keep running if NFS temporarily fails (containers may error)
|
||||||
|
# - Recovery service to auto-restart Nomad when NFS comes back or becomes stale
|
||||||
|
#
|
||||||
|
# Note: Mount uses Consul DNS which resolves at mount time. If NFS server
|
||||||
|
# moves to different IP, mount becomes stale and needs remount.
|
||||||
|
# The recovery service handles this by detecting stale mounts and restarting Nomad.
|
||||||
|
systemd.services.nomad = {
|
||||||
|
wants = [ "network-online.target" "data-services.mount" ];
|
||||||
|
after = [ "data-services.mount" ];
|
||||||
|
serviceConfig.ExecStartPre = "${pkgs.util-linux}/bin/findmnt --mountpoint /data/services";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Recovery service: automatically restart Nomad when NFS mount needs attention
|
||||||
|
# This handles scenarios where:
|
||||||
|
# - NFS server was down during boot (mount failed, Nomad hit start-limit)
|
||||||
|
# - NFS server failed over to different host with new IP (mount went stale)
|
||||||
|
# - Network outage temporarily broke the mount
|
||||||
|
#
|
||||||
|
# The timer runs every 30s and checks:
|
||||||
|
# 1. Is mount healthy (exists and accessible)?
|
||||||
|
# 2. If mount is stale/inaccessible → restart Nomad (triggers remount)
|
||||||
|
# 3. If mount is healthy but Nomad failed → restart Nomad (normal recovery)
|
||||||
|
systemd.services.nomad-mount-watcher = {
|
||||||
|
description = "Restart Nomad when NFS mount needs attention";
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
ExecStart = pkgs.writeShellScript "nomad-mount-watcher" ''
|
||||||
|
# Check if mount point exists
|
||||||
|
if ! ${pkgs.util-linux}/bin/findmnt --mountpoint /data/services >/dev/null 2>&1; then
|
||||||
|
exit 0 # Mount not present, nothing to do
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if mount is actually accessible (not stale)
|
||||||
|
# Use timeout to avoid hanging on stale NFS mounts
|
||||||
|
if ! ${pkgs.coreutils}/bin/timeout 5s ${pkgs.coreutils}/bin/stat /data/services >/dev/null 2>&1; then
|
||||||
|
echo "NFS mount is stale or inaccessible. Restarting Nomad to trigger remount..."
|
||||||
|
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount is healthy - check if Nomad needs recovery
|
||||||
|
if ${pkgs.systemd}/bin/systemctl is-failed nomad.service >/dev/null 2>&1; then
|
||||||
|
echo "NFS mount is healthy but Nomad is failed. Restarting Nomad..."
|
||||||
|
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||||
|
fi
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
extraSettingsPaths = [ "/etc/nomad-alo.json" ];
|
systemd.timers.nomad-mount-watcher = {
|
||||||
};
|
description = "Timer for Nomad mount watcher";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
systemd.services.nomad.wants = [ "network-online.target" ];
|
timerConfig = {
|
||||||
|
OnBootSec = "1min"; # First run 1min after boot
|
||||||
environment.etc."nomad-alo.json".text = builtins.toJSON {
|
OnUnitActiveSec = "30s"; # Then every 30s
|
||||||
plugin.docker.config = {
|
Unit = "nomad-mount-watcher.service";
|
||||||
allow_privileged = true;
|
};
|
||||||
# for keepalived, though only really needing "NET_ADMIN","NET_BROADCAST","NET_RAW" on top of default
|
|
||||||
# TODO: trim this down
|
|
||||||
allow_caps = [ "all" ];
|
|
||||||
volumes.enabled = true;
|
|
||||||
extra_labels = [
|
|
||||||
"job_name"
|
|
||||||
"task_group_name"
|
|
||||||
"task_name"
|
|
||||||
"node_name"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
plugin.raw_exec.config.enabled = true;
|
environment.etc."nomad-alo.json".text = builtins.toJSON {
|
||||||
};
|
plugin.docker.config = {
|
||||||
|
allow_privileged = true;
|
||||||
|
# for keepalived, though only really needing "NET_ADMIN","NET_BROADCAST","NET_RAW" on top of default
|
||||||
|
# TODO: trim this down
|
||||||
|
allow_caps = [ "all" ];
|
||||||
|
volumes.enabled = true;
|
||||||
|
extra_labels = [
|
||||||
|
"job_name"
|
||||||
|
"task_group_name"
|
||||||
|
"task_name"
|
||||||
|
"node_name"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
environment.persistence."/persist".directories = [
|
plugin.raw_exec.config.enabled = true;
|
||||||
"/var/lib/docker"
|
};
|
||||||
"/var/lib/nomad"
|
|
||||||
];
|
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||||
nomad
|
"/var/lib/docker"
|
||||||
wander
|
"/var/lib/nomad"
|
||||||
damon
|
];
|
||||||
];
|
|
||||||
|
|
||||||
networking.firewall = {
|
environment.systemPackages = with pkgs; [
|
||||||
allowedTCPPorts =
|
nomad
|
||||||
if server_enabled then
|
wander
|
||||||
[
|
damon
|
||||||
4646
|
];
|
||||||
4647
|
|
||||||
4648
|
networking.firewall = {
|
||||||
]
|
allowedTCPPorts =
|
||||||
else
|
if config.clusterRole.nomadServer then
|
||||||
[ 4646 ];
|
[
|
||||||
allowedUDPPorts = if server_enabled then [ 4648 ] else [ ];
|
4646
|
||||||
|
4647
|
||||||
|
4648
|
||||||
|
]
|
||||||
|
else
|
||||||
|
[ 4646 ];
|
||||||
|
allowedUDPPorts = if config.clusterRole.nomadServer then [ 4648 ] else [ ];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
44
common/resource-limits.nix
Normal file
44
common/resource-limits.nix
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
# Resource limits for user sessions to prevent system wedging
|
||||||
|
#
|
||||||
|
# Modern systemd/cgroups v2 approach to resource control (replaces ulimits).
|
||||||
|
# Limits apply to all user sessions (SSH, GUI, etc.) but NOT to system services.
|
||||||
|
#
|
||||||
|
# Rationale:
|
||||||
|
# - Prevents runaway user processes (nix builds, compiles, etc.) from consuming
|
||||||
|
# all resources and making the system unresponsive
|
||||||
|
# - System services (Nomad jobs, Consul, NFS, etc.) run outside user.slice and
|
||||||
|
# are unaffected by these limits
|
||||||
|
# - Ensures SSH access remains responsive even under heavy load
|
||||||
|
#
|
||||||
|
# CPU: Uses CPUWeight (not CPUQuota) so user sessions can use 100% when idle,
|
||||||
|
# but system services get priority (1.25x) during contention
|
||||||
|
# Memory: Soft limit at 90% (triggers pressure/reclaim), hard limit at 95%
|
||||||
|
# Gives 5% warning buffer before OOM kills
|
||||||
|
|
||||||
|
systemd.slices.user = {
|
||||||
|
sliceConfig = {
|
||||||
|
# CPU weight: 80 vs default 100 for system services
|
||||||
|
# When idle: user sessions use all available CPU
|
||||||
|
# Under contention: system services get 1.25x CPU share
|
||||||
|
CPUWeight = "80";
|
||||||
|
|
||||||
|
# Memory soft limit: triggers reclaim and memory pressure
|
||||||
|
# User will notice slowdown but processes keep running
|
||||||
|
MemoryHigh = "90%";
|
||||||
|
|
||||||
|
# Memory hard limit: OOM killer targets user.slice
|
||||||
|
# 5% buffer between MemoryHigh and MemoryMax provides warning
|
||||||
|
MemoryMax = "95%";
|
||||||
|
|
||||||
|
# Limit number of tasks (processes/threads)
|
||||||
|
# Prevents fork bombs while still allowing nix builds
|
||||||
|
TasksMax = "4096";
|
||||||
|
|
||||||
|
# Lower I/O priority slightly
|
||||||
|
# System services get preference during I/O contention
|
||||||
|
IOWeight = "90";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,8 +3,7 @@
|
|||||||
enable = true;
|
enable = true;
|
||||||
allowSFTP = true;
|
allowSFTP = true;
|
||||||
settings = {
|
settings = {
|
||||||
PasswordAuthentication = false;
|
PermitRootLogin = "prohibit-password"; # Allow root login with SSH keys only
|
||||||
KbdInteractiveAuthentication = false;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
{
|
|
||||||
# TODO: when deploying this to a new machine for the first time, first
|
|
||||||
# comment this out to get /data/sync created with the right owner and
|
|
||||||
# permissions. then, do it again with persistence enabled.
|
|
||||||
# This could list the owner user but I'm not sure if it's already created at
|
|
||||||
# the time impermanence setup runs.
|
|
||||||
# Note: chown syncthing:syncthing /data/sync && chmod 700 /data/sync also seems to work
|
|
||||||
environment.persistence."/persist".directories = [ "/data/sync" ];
|
|
||||||
|
|
||||||
services.syncthing = {
|
|
||||||
enable = true;
|
|
||||||
dataDir = "/data/sync";
|
|
||||||
openDefaultPorts = true;
|
|
||||||
#guiAddress = "0.0.0.0:8384";
|
|
||||||
overrideDevices = true;
|
|
||||||
overrideFolders = true;
|
|
||||||
settings = {
|
|
||||||
devices = {
|
|
||||||
"c1" = {
|
|
||||||
id = "XJECP3R-AZHCAYX-UIQKHIT-AHBK5UM-KM7T4OV-B7SEUCW-ZE2UFPG-7BNAIQZ";
|
|
||||||
};
|
|
||||||
"c2" = {
|
|
||||||
id = "Z3D476N-PUV6WAD-DSJWVBO-TWEOD4I-KDDMNRB-QEBOP6T-BYPGYTX-RAAYGAW";
|
|
||||||
};
|
|
||||||
"c3" = {
|
|
||||||
id = "D3C3YII-A3QGUNF-LHOGZNX-GJ4ZF3X-VVLMNY5-BBKF3BO-KNHKJMD-EA5QYQJ";
|
|
||||||
};
|
|
||||||
"zippy" = {
|
|
||||||
id = "WXDYZWN-JG2OBQH-CC42RMM-LPJGTS6-Y2BV37J-TYSLHL4-VHGYL5M-URI42QJ";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
folders = {
|
|
||||||
"wordpress" = {
|
|
||||||
path = "/data/sync/wordpress";
|
|
||||||
devices = [
|
|
||||||
"c1"
|
|
||||||
"c2"
|
|
||||||
"c3"
|
|
||||||
"zippy"
|
|
||||||
];
|
|
||||||
ignorePerms = false;
|
|
||||||
versioning = {
|
|
||||||
type = "staggered";
|
|
||||||
params = {
|
|
||||||
cleanInterval = "3600";
|
|
||||||
maxAge = "15768000";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
|
{ pkgs, lib, ... }:
|
||||||
{
|
{
|
||||||
boot.loader.systemd-boot = {
|
boot.loader.systemd-boot = {
|
||||||
enable = true;
|
enable = true;
|
||||||
configurationLimit = 5;
|
configurationLimit = 5;
|
||||||
|
memtest86.enable = lib.mkIf (pkgs.system == "x86_64-linux") true;
|
||||||
};
|
};
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
boot.loader.efi.canTouchEfiVariables = true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,8 +15,9 @@
|
|||||||
openssh.authorizedKeys.keys = [
|
openssh.authorizedKeys.keys = [
|
||||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net"
|
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net"
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+QbeQG/gTPJ2sIMPgZ3ZPEirVo5qX/carbZMKt50YN petru@happy"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+QbeQG/gTPJ2sIMPgZ3ZPEirVo5qX/carbZMKt50YN petru@happy"
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDZjL47pUIks2caErnbFYv+McJcWd+GSydzAXHZEtL8s JuiceSSH"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOOQ2EcJ+T+7BItZl89oDYhq7ZW4B9KuQVCy2DuQaPKR ppetru@sparky"
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINqULSU2VWUXSrHzFhs9pdXWZPtP/RS9gx7zz/zD/GDG petru@Workshop"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFRYVOfrqk2nFSyiu7TzU23ql8D6TfXICFpMIEvPbNsc JuiceSSH"
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINBIqK6+aPIbmviJPWP8PI/k8GmaC7RO8v2ENnsK8sJx ppetru@beefy"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
55
common/wait-for-dns-ready.nix
Normal file
55
common/wait-for-dns-ready.nix
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
# Service to wait for DNS resolution to be actually functional
|
||||||
|
# This is needed because network-online.target and wait-online.service
|
||||||
|
# don't guarantee DNS works - they only check that interfaces are configured.
|
||||||
|
#
|
||||||
|
# Problem: NFS mounts using Consul DNS names (data-services.service.consul)
|
||||||
|
# fail at boot because DNS resolution isn't ready even though network is "online"
|
||||||
|
#
|
||||||
|
# Solution: Actively test DNS resolution before considering network truly ready
|
||||||
|
|
||||||
|
systemd.services.wait-for-dns-ready = {
|
||||||
|
description = "Wait for DNS resolution to be functional";
|
||||||
|
after = [
|
||||||
|
"systemd-networkd-wait-online.service"
|
||||||
|
"systemd-resolved.service"
|
||||||
|
"network-online.target"
|
||||||
|
];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
ExecStart = pkgs.writeShellScript "wait-for-dns-ready" ''
|
||||||
|
# Test DNS resolution by attempting to resolve data-services.service.consul
|
||||||
|
# This ensures the full DNS path works: interface → gateway → Consul DNS
|
||||||
|
|
||||||
|
echo "Waiting for DNS resolution to be ready..."
|
||||||
|
|
||||||
|
for i in {1..30}; do
|
||||||
|
# Use getent which respects /etc/nsswitch.conf and systemd-resolved
|
||||||
|
if ${pkgs.glibc.bin}/bin/getent hosts data-services.service.consul >/dev/null 2>&1; then
|
||||||
|
echo "DNS ready: data-services.service.consul resolved successfully"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Also test a public DNS name to distinguish between general DNS failure
|
||||||
|
# vs Consul-specific issues (helpful for debugging)
|
||||||
|
if ! ${pkgs.glibc.bin}/bin/getent hosts www.google.com >/dev/null 2>&1; then
|
||||||
|
echo "Attempt $i/30: General DNS not working yet, waiting..."
|
||||||
|
else
|
||||||
|
echo "Attempt $i/30: General DNS works but Consul DNS not ready yet, waiting..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Warning: DNS not fully ready after 30 seconds"
|
||||||
|
echo "NFS mounts with 'nofail' option will handle this gracefully"
|
||||||
|
exit 0 # Don't block boot - let nofail mounts handle DNS failures
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
35
common/wifi.nix
Normal file
35
common/wifi.nix
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
{ config, lib, ... }:
|
||||||
|
{
|
||||||
|
sops.secrets.wifi-password-pi = {
|
||||||
|
sopsFile = ./../secrets/wifi.yaml;
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.wireless = {
|
||||||
|
enable = true;
|
||||||
|
secretsFile = config.sops.secrets.wifi-password-pi.path;
|
||||||
|
networks = {
|
||||||
|
"pi" = {
|
||||||
|
pskRaw = "ext:pi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Only enable on wireless interface, not ethernet
|
||||||
|
interfaces = [ "wlan0" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Prefer wifi over ethernet, but keep ethernet as fallback
|
||||||
|
networking.dhcpcd.extraConfig = ''
|
||||||
|
# Prefer wlan0 over ethernet interfaces
|
||||||
|
interface wlan0
|
||||||
|
metric 100
|
||||||
|
|
||||||
|
interface eth0
|
||||||
|
metric 200
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Persist wireless configuration across reboots (for impermanence)
|
||||||
|
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||||
|
files = [
|
||||||
|
"/etc/wpa_supplicant.conf"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,5 +1,12 @@
|
|||||||
{ pkgs, inputs, ... }:
|
{ pkgs, inputs, ... }:
|
||||||
{
|
{
|
||||||
|
# Workstation profile: Development workstation configuration
|
||||||
|
# Adds development tools and emulation on top of minimal-node
|
||||||
|
imports = [
|
||||||
|
./minimal-node.nix
|
||||||
|
./unattended-encryption.nix
|
||||||
|
];
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
wget
|
wget
|
||||||
deploy-rs
|
deploy-rs
|
||||||
55
docs/AUTH_SETUP.md
Normal file
55
docs/AUTH_SETUP.md
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
# Authentication Setup
|
||||||
|
|
||||||
|
SSO for homelab services using OIDC.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
**Pocket ID** (`pocket-id.v.paler.net`) - Lightweight OIDC provider, data in `/data/services/pocket-id`
|
||||||
|
|
||||||
|
**Traefik** - Uses `traefik-oidc-auth` plugin (v0.16.0) to protect services
|
||||||
|
- Plugin downloaded from GitHub at startup, cached in `/data/services/traefik/plugins-storage`
|
||||||
|
- Middleware config in `/data/services/traefik/rules/middlewares.yml`
|
||||||
|
- Protected services add tag: `traefik.http.routers.<name>.middlewares=oidc-auth@file`
|
||||||
|
|
||||||
|
## Flow
|
||||||
|
|
||||||
|
1. User hits protected service → Traefik intercepts
|
||||||
|
2. Redirects to Pocket ID for login
|
||||||
|
3. Pocket ID returns OIDC token
|
||||||
|
4. Traefik validates and forwards with `X-Oidc-Username` header
|
||||||
|
|
||||||
|
## Protected Services
|
||||||
|
|
||||||
|
Use `oidc-auth@file` middleware (grep codebase for full list):
|
||||||
|
- Wikis (TiddlyWiki instances)
|
||||||
|
- Media stack (Radarr, Sonarr, Plex, etc.)
|
||||||
|
- Infrastructure (Traefik dashboard, Loki, Jupyter, Unifi)
|
||||||
|
|
||||||
|
## Key Files
|
||||||
|
|
||||||
|
- `services/pocket-id.hcl` - OIDC provider
|
||||||
|
- `services/traefik.hcl` - Plugin declaration
|
||||||
|
- `/data/services/traefik/rules/middlewares.yml` - Middleware definitions (oidc-auth, simple-auth fallback)
|
||||||
|
|
||||||
|
## Cold Start Notes
|
||||||
|
|
||||||
|
- Traefik needs internet to download plugin on first start
|
||||||
|
- Pocket ID needs `/data/services` NFS mounted
|
||||||
|
- Pocket ID down = all protected services inaccessible
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
**Infinite redirects**: Check `TRUST_PROXY=true` on Pocket ID
|
||||||
|
|
||||||
|
**Plugin not loading**: Clear cache in `/data/services/traefik/plugins-storage/`, restart Traefik
|
||||||
|
|
||||||
|
**401 after login**: Verify client ID/secret in middlewares.yml matches Pocket ID client config
|
||||||
|
|
||||||
|
## Migration History
|
||||||
|
|
||||||
|
- Previous: Authentik with forwardAuth (removed Nov 2024)
|
||||||
|
- Current: Pocket ID + traefik-oidc-auth (simpler, lighter)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Manage users/clients via Pocket ID UI. Basic auth fallback available via `simple-auth` middleware.*
|
||||||
1717
docs/CLUSTER_REVAMP.md
Normal file
1717
docs/CLUSTER_REVAMP.md
Normal file
File diff suppressed because it is too large
Load Diff
288
docs/DIFF_CONFIGS.md
Normal file
288
docs/DIFF_CONFIGS.md
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
# Configuration Diff Tool
|
||||||
|
|
||||||
|
Tool to compare all NixOS host configurations between current working tree and HEAD commit.
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
Before committing changes (especially refactors), verify that you haven't accidentally broken existing host configurations. This tool:
|
||||||
|
- Builds all host configurations in current state (with uncommitted changes)
|
||||||
|
- Builds all host configurations at HEAD (last commit)
|
||||||
|
- Uses `nvd` to show readable diffs for each host
|
||||||
|
- Highlights which hosts changed and which didn't
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
The script requires `nvd` to be in PATH. Use either:
|
||||||
|
|
||||||
|
**Option 1: direnv (recommended)**
|
||||||
|
```bash
|
||||||
|
# Allow direnv in the repository (one-time setup)
|
||||||
|
direnv allow
|
||||||
|
|
||||||
|
# direnv will automatically load the dev shell when you cd into the directory
|
||||||
|
cd /home/ppetru/projects/alo-cluster
|
||||||
|
# nvd is now in PATH
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 2: nix develop**
|
||||||
|
```bash
|
||||||
|
# Enter dev shell manually
|
||||||
|
nix develop
|
||||||
|
|
||||||
|
# Now run the script
|
||||||
|
./scripts/diff-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Compare all hosts (summary)
|
||||||
|
./scripts/diff-configs.sh
|
||||||
|
|
||||||
|
# Compare with detailed path listing
|
||||||
|
./scripts/diff-configs.sh -v c1
|
||||||
|
|
||||||
|
# Compare with content diffs of changed files (deep mode)
|
||||||
|
./scripts/diff-configs.sh --deep c1
|
||||||
|
|
||||||
|
# Compare only x86_64 hosts (avoid slow ARM cross-compilation)
|
||||||
|
./scripts/diff-configs.sh c1 c2 c3 zippy chilly sparky
|
||||||
|
|
||||||
|
# Verbose mode with multiple hosts
|
||||||
|
./scripts/diff-configs.sh --verbose c1 c2 c3
|
||||||
|
|
||||||
|
# Via flake app
|
||||||
|
nix run .#diff-configs
|
||||||
|
|
||||||
|
# Show help
|
||||||
|
./scripts/diff-configs.sh --help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Typical Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Make changes to configurations
|
||||||
|
vim common/impermanence.nix
|
||||||
|
|
||||||
|
# 2. Stage changes (required for flake to see them)
|
||||||
|
git add common/impermanence.nix
|
||||||
|
|
||||||
|
# 3. Check what would change if you committed now
|
||||||
|
# For quick feedback, compare only x86_64 hosts first:
|
||||||
|
./scripts/diff-configs.sh c1 c2 c3 zippy chilly sparky
|
||||||
|
|
||||||
|
# 4. Review output, make adjustments if needed
|
||||||
|
|
||||||
|
# 5. If changes look good and affect ARM hosts, check those too:
|
||||||
|
./scripts/diff-configs.sh stinky alo-cloud-1
|
||||||
|
|
||||||
|
# 6. Commit when satisfied
|
||||||
|
git commit -m "Refactor impermanence config"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Explanation
|
||||||
|
|
||||||
|
### No Changes
|
||||||
|
```
|
||||||
|
━━━ c1 ━━━
|
||||||
|
Building current... done
|
||||||
|
Building HEAD... done
|
||||||
|
✓ No changes
|
||||||
|
```
|
||||||
|
This host's configuration is identical between current and HEAD.
|
||||||
|
|
||||||
|
### Changes Detected
|
||||||
|
```
|
||||||
|
━━━ stinky ━━━
|
||||||
|
Building current... done
|
||||||
|
Building HEAD... done
|
||||||
|
⚠ Configuration changed
|
||||||
|
|
||||||
|
<<< /nix/store/abc-nixos-system-stinky-25.05 (HEAD)
|
||||||
|
>>> /nix/store/xyz-nixos-system-stinky-25.05 (current)
|
||||||
|
|
||||||
|
Version changes:
|
||||||
|
[C] octoprint: 1.9.3 -> 1.10.0
|
||||||
|
[A+] libcamera: ∅ -> 0.1.0
|
||||||
|
Closure size: 1500 -> 1520 (5 paths added, 2 paths removed, +3, +15.2 MB)
|
||||||
|
```
|
||||||
|
|
||||||
|
Legend:
|
||||||
|
- `[C]` - Changed package version
|
||||||
|
- `[A+]` - Added package
|
||||||
|
- `[R-]` - Removed package
|
||||||
|
- `[U.]` - Updated (same version, rebuilt)
|
||||||
|
|
||||||
|
### Verbose Mode (--verbose)
|
||||||
|
|
||||||
|
With `-v` or `--verbose`, also shows the actual store paths that changed:
|
||||||
|
|
||||||
|
```
|
||||||
|
━━━ c1 ━━━
|
||||||
|
Building current... done
|
||||||
|
Building HEAD... done
|
||||||
|
⚠ Configuration changed
|
||||||
|
|
||||||
|
[nvd summary as above]
|
||||||
|
|
||||||
|
Changed store paths:
|
||||||
|
Removed (17 paths):
|
||||||
|
- config.fish
|
||||||
|
- system-units
|
||||||
|
- home-manager-generation
|
||||||
|
- etc-fuse.conf
|
||||||
|
... and 13 more
|
||||||
|
|
||||||
|
Added (17 paths):
|
||||||
|
- config.fish
|
||||||
|
- system-units
|
||||||
|
- home-manager-generation
|
||||||
|
- etc-fuse.conf
|
||||||
|
... and 13 more
|
||||||
|
```
|
||||||
|
|
||||||
|
This is useful when nvd shows "No version changes" but paths still changed (e.g., refactors that rebuild config files).
|
||||||
|
|
||||||
|
### Deep Mode (--deep)
|
||||||
|
|
||||||
|
With `-d` or `--deep`, shows actual content diffs of changed files within store paths (implies verbose):
|
||||||
|
|
||||||
|
```
|
||||||
|
━━━ c1 ━━━
|
||||||
|
Building current... done
|
||||||
|
Building HEAD... done
|
||||||
|
⚠ Configuration changed
|
||||||
|
|
||||||
|
[nvd summary and path listing as above]
|
||||||
|
|
||||||
|
Content diffs of changed files:
|
||||||
|
|
||||||
|
▸ etc-fuse.conf
|
||||||
|
@@ -1,2 +1,2 @@
|
||||||
|
-user_allow_other
|
||||||
|
+#user_allow_other
|
||||||
|
mount_max = 1000
|
||||||
|
|
||||||
|
▸ nixos-system-c1-25.05
|
||||||
|
activate:
|
||||||
|
@@ -108,7 +108,7 @@
|
||||||
|
echo "setting up /etc..."
|
||||||
|
-/nix/store/...-perl/bin/perl /nix/store/...-setup-etc.pl /nix/store/abc-etc/etc
|
||||||
|
+/nix/store/...-perl/bin/perl /nix/store/...-setup-etc.pl /nix/store/xyz-etc/etc
|
||||||
|
|
||||||
|
▸ unit-dbus.service
|
||||||
|
dbus.service:
|
||||||
|
@@ -1,5 +1,5 @@
|
||||||
|
[Service]
|
||||||
|
+Environment="LD_LIBRARY_PATH=/nix/store/.../systemd/lib"
|
||||||
|
Environment="LOCALE_ARCHIVE=..."
|
||||||
|
```
|
||||||
|
|
||||||
|
**What it shows**:
|
||||||
|
- Matches changed paths by basename (e.g., both have "config.fish")
|
||||||
|
- Diffs important files: activate scripts, etc/*, *.conf, *.fish, *.service, *.nix
|
||||||
|
- Shows unified diff format (lines added/removed)
|
||||||
|
- Limits to first 50 lines per file
|
||||||
|
|
||||||
|
**When to use**:
|
||||||
|
- When you need to know **what exactly changed** in config files
|
||||||
|
- Debugging unexpected configuration changes
|
||||||
|
- Reviewing refactors that don't change package versions
|
||||||
|
- Understanding why a host rebuilt despite "No version changes"
|
||||||
|
|
||||||
|
### Build Failures
|
||||||
|
```
|
||||||
|
━━━ broken-host ━━━
|
||||||
|
Building current... FAILED
|
||||||
|
Error: attribute 'foo' missing
|
||||||
|
```
|
||||||
|
If a host fails to build, the error is shown and the script continues with other hosts.
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
1. **Discovers hosts**: Queries `deploy.nodes` from flake to get all configured hosts
|
||||||
|
2. **Creates worktree**: Uses `git worktree` to check out HEAD in a temporary directory
|
||||||
|
3. **Builds configurations**: Builds `config.system.build.toplevel` for each host in both locations
|
||||||
|
4. **Compares with nvd**: Runs `nvd diff` to show package-level changes
|
||||||
|
5. **Cleans up**: Removes temporary worktree automatically
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
|
|
||||||
|
### Git Staging Required
|
||||||
|
|
||||||
|
Flakes only evaluate files that are tracked by git. To make changes visible:
|
||||||
|
```bash
|
||||||
|
# Stage new files
|
||||||
|
git add new-file.nix
|
||||||
|
|
||||||
|
# Stage changes to existing files
|
||||||
|
git add modified-file.nix
|
||||||
|
|
||||||
|
# Or stage everything
|
||||||
|
git add .
|
||||||
|
```
|
||||||
|
|
||||||
|
Unstaged changes to tracked files **are** visible (flake uses working tree content).
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
- First run may be slow (building all configurations)
|
||||||
|
- Subsequent runs benefit from Nix evaluation cache
|
||||||
|
- Typical runtime: 1-5 minutes depending on changes
|
||||||
|
- **ARM cross-compilation is slow**: Use host filtering to avoid building ARM hosts when not needed
|
||||||
|
- Example: `./scripts/diff-configs.sh c1 c2 c3` (x86_64 only, fast)
|
||||||
|
- vs `./scripts/diff-configs.sh` (includes stinky/alo-cloud-1, slow)
|
||||||
|
|
||||||
|
### When to Use
|
||||||
|
|
||||||
|
**Good use cases**:
|
||||||
|
- Refactoring shared modules (like impermanence)
|
||||||
|
- Updating common configurations
|
||||||
|
- Before committing significant changes
|
||||||
|
- Verifying deploy target consistency
|
||||||
|
|
||||||
|
**Not needed for**:
|
||||||
|
- Adding a single new host
|
||||||
|
- Trivial one-host changes
|
||||||
|
- Documentation updates
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### "Not in a git repository"
|
||||||
|
```bash
|
||||||
|
cd /home/ppetru/projects/alo-cluster
|
||||||
|
./scripts/diff-configs.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### "No changes detected"
|
||||||
|
All changes are already committed. Stage some changes first:
|
||||||
|
```bash
|
||||||
|
git add .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build failures for all hosts
|
||||||
|
Check flake syntax:
|
||||||
|
```bash
|
||||||
|
nix flake check
|
||||||
|
```
|
||||||
|
|
||||||
|
### nvd not found
|
||||||
|
Install nvd:
|
||||||
|
```bash
|
||||||
|
nix profile install nixpkgs#nvd
|
||||||
|
```
|
||||||
|
(Already included in workstation-node.nix packages)
|
||||||
|
|
||||||
|
## Related Tools
|
||||||
|
|
||||||
|
- `nvd` - Package diff tool (used internally)
|
||||||
|
- `nix diff-closures` - Low-level closure diff
|
||||||
|
- `nix store diff-closures` - Alternative diff command
|
||||||
|
- `deploy-rs` - Actual deployment tool
|
||||||
|
|
||||||
|
## See Also
|
||||||
|
|
||||||
|
- `common/global/show-changelog.nix` - Shows changes during system activation
|
||||||
|
- `docs/RASPBERRY_PI_SD_IMAGE.md` - SD image building process
|
||||||
160
docs/MIGRATION_TODO.md
Normal file
160
docs/MIGRATION_TODO.md
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
# Cluster Revamp Migration TODO
|
||||||
|
|
||||||
|
Track migration progress from GlusterFS to NFS-based architecture.
|
||||||
|
See [CLUSTER_REVAMP.md](./CLUSTER_REVAMP.md) for detailed procedures.
|
||||||
|
|
||||||
|
## Phase 0: Preparation
|
||||||
|
- [x] Review cluster revamp plan
|
||||||
|
- [ ] Backup everything (kopia snapshots current)
|
||||||
|
- [ ] Document current state (nomad jobs, consul services)
|
||||||
|
|
||||||
|
## Phase 1: Convert fractal to NixOS (DEFERRED - do after GlusterFS migration)
|
||||||
|
- [ ] Document fractal's current ZFS layout
|
||||||
|
- [ ] Install NixOS on fractal
|
||||||
|
- [ ] Import ZFS pools (double1, double2, double3)
|
||||||
|
- [ ] Create fractal NixOS configuration
|
||||||
|
- [ ] Configure Samba server for media/shared/homes
|
||||||
|
- [ ] Configure Kopia backup server
|
||||||
|
- [ ] Deploy and verify fractal base config
|
||||||
|
- [ ] Join fractal to cluster (5-server quorum)
|
||||||
|
- [ ] Update all cluster configs for 5-server quorum
|
||||||
|
- [ ] Verify fractal fully operational
|
||||||
|
|
||||||
|
## Phase 2: Setup zippy storage layer
|
||||||
|
- [x] Create btrfs subvolume `/persist/services` on zippy
|
||||||
|
- [x] Configure NFS server on zippy (nfs-services-server.nix)
|
||||||
|
- [x] Configure Consul service registration for NFS
|
||||||
|
- [x] Setup btrfs replication to c1 (incremental, 5min intervals)
|
||||||
|
- [x] Fix replication script to handle SSH command restrictions
|
||||||
|
- [x] Setup standby storage on c1 (`/persist/services-standby`)
|
||||||
|
- [x] Configure c1 as standby (nfs-services-standby.nix)
|
||||||
|
- [x] Configure Kopia to exclude replication snapshots
|
||||||
|
- [x] Deploy and verify NFS server on zippy
|
||||||
|
- [x] Verify replication working to c1
|
||||||
|
- [ ] Setup standby storage on c2 (if desired)
|
||||||
|
- [ ] Configure replication to c2 (if desired)
|
||||||
|
|
||||||
|
## Phase 3: Migrate from GlusterFS to NFS
|
||||||
|
- [x] Update all nodes to mount NFS at `/data/services`
|
||||||
|
- [x] Deploy updated configs (NFS client on all nodes)
|
||||||
|
- [x] Stop all Nomad jobs temporarily
|
||||||
|
- [x] Copy data from GlusterFS to zippy NFS
|
||||||
|
- [x] Copy `/data/compute/appdata/*` → `/persist/services/appdata/`
|
||||||
|
- [x] Copy `/data/compute/config/*` → `/persist/services/config/`
|
||||||
|
- [x] Copy `/data/sync/wordpress` → `/persist/services/appdata/wordpress`
|
||||||
|
- [x] Verify data integrity
|
||||||
|
- [x] Verify NFS mounts working on all nodes
|
||||||
|
- [x] Stop GlusterFS volume
|
||||||
|
- [x] Delete GlusterFS volume
|
||||||
|
- [x] Remove GlusterFS from NixOS configs
|
||||||
|
- [x] Remove syncthing wordpress sync configuration (no longer used)
|
||||||
|
|
||||||
|
## Phase 4: Update and redeploy Nomad jobs
|
||||||
|
|
||||||
|
### Core Infrastructure (CRITICAL)
|
||||||
|
- [x] mysql.hcl - moved to zippy, using `/data/services`
|
||||||
|
- [x] postgres.hcl - migrated to `/data/services`
|
||||||
|
- [x] redis.hcl - migrated to `/data/services`
|
||||||
|
- [x] traefik.hcl - migrated to `/data/services`
|
||||||
|
- [x] authentik.hcl - stateless, no changes needed
|
||||||
|
|
||||||
|
### Monitoring Stack (HIGH)
|
||||||
|
- [x] prometheus.hcl - migrated to `/data/services`
|
||||||
|
- [x] grafana.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] loki.hcl - migrated to `/data/services`
|
||||||
|
- [x] vector.hcl - removed glusterfs log collection (2025-10-23)
|
||||||
|
|
||||||
|
### Databases (HIGH)
|
||||||
|
- [x] clickhouse.hcl - migrated to `/data/services`
|
||||||
|
- [x] unifi.hcl - migrated to `/data/services` (includes mongodb)
|
||||||
|
|
||||||
|
### Web Applications (HIGH-MEDIUM)
|
||||||
|
- [x] wordpress.hcl - migrated to `/data/services`
|
||||||
|
- [x] gitea.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] wiki.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] plausible.hcl - stateless, no changes needed
|
||||||
|
|
||||||
|
### Web Applications (LOW, may be deprecated)
|
||||||
|
- [x] vikunja.hcl - migrated to `/data/services` (2025-10-23, not running)
|
||||||
|
|
||||||
|
### Media Stack (MEDIUM)
|
||||||
|
- [x] media.hcl - migrated to `/data/services`
|
||||||
|
|
||||||
|
### Utility Services (MEDIUM-LOW)
|
||||||
|
- [x] evcc.hcl - migrated to `/data/services`
|
||||||
|
- [x] weewx.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] code-server.hcl - migrated to `/data/services`
|
||||||
|
- [x] beancount.hcl - migrated to `/data/services`
|
||||||
|
- [x] adminer.hcl - stateless, no changes needed
|
||||||
|
- [x] maps.hcl - migrated to `/data/services`
|
||||||
|
- [x] netbox.hcl - migrated to `/data/services`
|
||||||
|
- [x] farmos.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] urbit.hcl - migrated to `/data/services`
|
||||||
|
- [x] webodm.hcl - migrated to `/data/services` (2025-10-23, not running)
|
||||||
|
- [x] velutrack.hcl - migrated to `/data/services`
|
||||||
|
- [x] resol-gateway.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] igsync.hcl - migrated to `/data/services` (2025-10-23)
|
||||||
|
- [x] jupyter.hcl - migrated to `/data/services` (2025-10-23, not running)
|
||||||
|
- [x] whoami.hcl - stateless test service, no changes needed
|
||||||
|
|
||||||
|
### Backup Jobs (HIGH)
|
||||||
|
- [x] mysql-backup - moved to zippy, verified
|
||||||
|
- [x] postgres-backup.hcl - migrated to `/data/services`
|
||||||
|
|
||||||
|
### Host Volume Definitions (CRITICAL)
|
||||||
|
- [x] common/nomad.nix - consolidated `appdata` and `code` volumes into single `services` volume (2025-10-23)
|
||||||
|
|
||||||
|
### Verification
|
||||||
|
- [ ] All services healthy in Nomad
|
||||||
|
- [ ] All services registered in Consul
|
||||||
|
- [ ] Traefik routes working
|
||||||
|
- [ ] Database jobs running on zippy (verify via nomad alloc status)
|
||||||
|
- [ ] Media jobs running on fractal (verify via nomad alloc status)
|
||||||
|
|
||||||
|
## Phase 5: Convert sunny to NixOS (OPTIONAL - can defer)
|
||||||
|
- [ ] Document current sunny setup (ethereum containers/VMs)
|
||||||
|
- [ ] Backup ethereum data
|
||||||
|
- [ ] Install NixOS on sunny
|
||||||
|
- [ ] Restore ethereum data to `/persist/ethereum`
|
||||||
|
- [ ] Create sunny container-based config (besu, lighthouse, rocketpool)
|
||||||
|
- [ ] Deploy and verify ethereum stack
|
||||||
|
- [ ] Monitor sync status and validation
|
||||||
|
|
||||||
|
## Phase 6: Verification and cleanup
|
||||||
|
- [ ] Test NFS failover procedure (zippy → c1)
|
||||||
|
- [ ] Verify backups include `/persist/services` data
|
||||||
|
- [ ] Verify backups exclude replication snapshots
|
||||||
|
- [ ] Update documentation (README.md, architecture diagrams)
|
||||||
|
- [x] Clean up old GlusterFS data (only after everything verified!)
|
||||||
|
- [x] Remove old glusterfs directories from all nodes
|
||||||
|
|
||||||
|
## Post-Migration Checklist
|
||||||
|
- [ ] All 5 servers in quorum (consul members)
|
||||||
|
- [ ] NFS mounts working on all nodes
|
||||||
|
- [ ] Btrfs replication running (check systemd timers on zippy)
|
||||||
|
- [ ] Critical services up (mysql, postgres, redis, traefik, authentik)
|
||||||
|
- [ ] Monitoring working (prometheus, grafana, loki)
|
||||||
|
- [ ] Media stack on fractal
|
||||||
|
- [ ] Database jobs on zippy
|
||||||
|
- [ ] Consul DNS working (dig @localhost -p 8600 data-services.service.consul)
|
||||||
|
- [ ] Backups running (kopia snapshots include /persist/services)
|
||||||
|
- [ ] GlusterFS removed (no processes, volumes deleted)
|
||||||
|
- [ ] Documentation updated
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last updated**: 2025-10-25
|
||||||
|
**Current phase**: Phase 3 & 4 complete! GlusterFS removed, all services on NFS
|
||||||
|
**Note**: Phase 1 (fractal NixOS conversion) deferred until after GlusterFS migration is complete
|
||||||
|
|
||||||
|
## Migration Summary
|
||||||
|
|
||||||
|
**All services migrated to `/data/services` (30 total):**
|
||||||
|
mysql, mysql-backup, postgres, postgres-backup, redis, clickhouse, prometheus, grafana, loki, vector, unifi, wordpress, gitea, wiki, traefik, evcc, weewx, netbox, farmos, webodm, jupyter, vikunja, urbit, code-server, beancount, velutrack, maps, media, resol-gateway, igsync
|
||||||
|
|
||||||
|
**Stateless/no changes needed (4 services):**
|
||||||
|
authentik, adminer, plausible, whoami
|
||||||
|
|
||||||
|
**Configuration changes:**
|
||||||
|
- common/nomad.nix: consolidated `appdata` and `code` volumes into single `services` volume
|
||||||
|
- vector.hcl: removed glusterfs log collection
|
||||||
438
docs/NFS_FAILOVER.md
Normal file
438
docs/NFS_FAILOVER.md
Normal file
@@ -0,0 +1,438 @@
|
|||||||
|
# NFS Services Failover Procedures
|
||||||
|
|
||||||
|
This document describes how to fail over the `/data/services` NFS server between hosts and how to fail back.
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
- **Primary NFS Server**: Typically `zippy`
|
||||||
|
- Exports `/persist/services` via NFS
|
||||||
|
- Has local bind mount: `/data/services` → `/persist/services` (same path as clients)
|
||||||
|
- Registers `data-services.service.consul` in Consul
|
||||||
|
- Sets Nomad node meta: `storage_role = "primary"`
|
||||||
|
- Replicates snapshots to standbys every 5 minutes via btrfs send
|
||||||
|
- **Safety check**: Refuses to start if another NFS server is already active in Consul
|
||||||
|
|
||||||
|
- **Standby**: Typically `c1`
|
||||||
|
- Receives snapshots at `/persist/services-standby/services@<timestamp>`
|
||||||
|
- Can be promoted to NFS server during failover
|
||||||
|
- No special Nomad node meta (not primary)
|
||||||
|
|
||||||
|
- **Clients**: All cluster nodes (c1, c2, c3, zippy)
|
||||||
|
- Mount `/data/services` from `data-services.service.consul:/persist/services`
|
||||||
|
- Automatically connect to whoever is registered in Consul
|
||||||
|
|
||||||
|
### Nomad Job Constraints
|
||||||
|
|
||||||
|
Jobs that need to run on the primary storage node should use:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
constraint {
|
||||||
|
attribute = "${meta.storage_role}"
|
||||||
|
value = "primary"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This is useful for:
|
||||||
|
- Database jobs (mysql, postgres, redis) that benefit from local storage
|
||||||
|
- Jobs that need guaranteed fast disk I/O
|
||||||
|
|
||||||
|
During failover, the `storage_role = "primary"` meta attribute moves to the new NFS server, and Nomad automatically reschedules constrained jobs to the new primary.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Standby has been receiving snapshots (check: `ls /persist/services-standby/services@*`)
|
||||||
|
- Last successful replication was recent (< 5-10 minutes)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Failover: Promoting Standby to Primary
|
||||||
|
|
||||||
|
**Scenario**: `zippy` is down and you need to promote `c1` to be the NFS server.
|
||||||
|
|
||||||
|
### Step 1: Choose Latest Snapshot
|
||||||
|
|
||||||
|
On the standby (c1):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh c1
|
||||||
|
sudo ls -lt /persist/services-standby/services@* | head -5
|
||||||
|
```
|
||||||
|
|
||||||
|
Find the most recent snapshot. Note the timestamp to estimate data loss (typically < 5 minutes).
|
||||||
|
|
||||||
|
### Step 2: Promote Snapshot to Read-Write Subvolume
|
||||||
|
|
||||||
|
On c1:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Find the latest snapshot
|
||||||
|
LATEST=$(sudo ls -t /persist/services-standby/services@* | head -1)
|
||||||
|
|
||||||
|
# Create writable subvolume from snapshot
|
||||||
|
sudo btrfs subvolume snapshot "$LATEST" /persist/services
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
ls -la /persist/services
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Update NixOS Configuration
|
||||||
|
|
||||||
|
Edit your configuration to swap the NFS server role:
|
||||||
|
|
||||||
|
**In `hosts/c1/default.nix`**:
|
||||||
|
```nix
|
||||||
|
imports = [
|
||||||
|
# ... existing imports ...
|
||||||
|
# ../../common/nfs-services-standby.nix # REMOVE THIS
|
||||||
|
../../common/nfs-services-server.nix # ADD THIS
|
||||||
|
];
|
||||||
|
|
||||||
|
# Add standbys if desired (optional - can leave empty during emergency)
|
||||||
|
nfsServicesServer.standbys = []; # Or ["c2"] to add a new standby
|
||||||
|
```
|
||||||
|
|
||||||
|
**Optional: Prepare zippy config for when it comes back**:
|
||||||
|
|
||||||
|
In `hosts/zippy/default.nix` (can do this later too):
|
||||||
|
```nix
|
||||||
|
imports = [
|
||||||
|
# ... existing imports ...
|
||||||
|
# ../../common/nfs-services-server.nix # REMOVE THIS
|
||||||
|
../../common/nfs-services-standby.nix # ADD THIS
|
||||||
|
];
|
||||||
|
|
||||||
|
# Add the replication key from c1 (get it from c1:/persist/root/.ssh/btrfs-replication.pub)
|
||||||
|
nfsServicesStandby.replicationKeys = [
|
||||||
|
"ssh-ed25519 AAAA... root@c1-replication"
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Deploy Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# From your workstation
|
||||||
|
deploy -s '.#c1'
|
||||||
|
|
||||||
|
# If zippy is still down, updating its config will fail, but that's okay
|
||||||
|
# You can update it later when it comes back
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Verify NFS Server is Running
|
||||||
|
|
||||||
|
On c1:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl status nfs-server
|
||||||
|
sudo showmount -e localhost
|
||||||
|
dig @localhost -p 8600 data-services.service.consul # Should show c1's IP
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 6: Verify Clients Can Access
|
||||||
|
|
||||||
|
From any node:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
df -h | grep services
|
||||||
|
ls /data/services
|
||||||
|
```
|
||||||
|
|
||||||
|
The mount should automatically reconnect via Consul DNS.
|
||||||
|
|
||||||
|
### Step 7: Check Nomad Jobs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nomad job status mysql
|
||||||
|
nomad job status postgres
|
||||||
|
# Verify critical services are healthy
|
||||||
|
|
||||||
|
# Jobs constrained to ${meta.storage_role} = "primary" will automatically
|
||||||
|
# reschedule to c1 once it's deployed with the NFS server module
|
||||||
|
```
|
||||||
|
|
||||||
|
**Recovery Time Objective (RTO)**: ~10-15 minutes
|
||||||
|
**Recovery Point Objective (RPO)**: Last replication interval (5 minutes max)
|
||||||
|
|
||||||
|
**Note**: Jobs with the `storage_role = "primary"` constraint will automatically move to c1 because it now has that node meta attribute. No job spec changes needed!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## What Happens When zippy Comes Back?
|
||||||
|
|
||||||
|
**IMPORTANT**: If zippy reboots while still configured as NFS server, it will **refuse to start** the NFS service because it detects c1 is already active in Consul.
|
||||||
|
|
||||||
|
You'll see this error in `journalctl -u nfs-server`:
|
||||||
|
|
||||||
|
```
|
||||||
|
ERROR: Another NFS server is already active at 192.168.1.X
|
||||||
|
This host (192.168.1.2) is configured as NFS server but should be standby.
|
||||||
|
To fix:
|
||||||
|
1. If this is intentional (failback), first demote the other server
|
||||||
|
2. Update this host's config to use nfs-services-standby.nix instead
|
||||||
|
3. Sync data from active server before promoting this host
|
||||||
|
```
|
||||||
|
|
||||||
|
This is a **safety feature** to prevent split-brain and data corruption.
|
||||||
|
|
||||||
|
### Options when zippy comes back:
|
||||||
|
|
||||||
|
**Option A: Keep c1 as primary** (zippy becomes standby)
|
||||||
|
1. Update zippy's config to use `nfs-services-standby.nix`
|
||||||
|
2. Deploy to zippy
|
||||||
|
3. c1 will start replicating to zippy
|
||||||
|
|
||||||
|
**Option B: Fail back to zippy as primary**
|
||||||
|
Follow the "Failing Back to Original Primary" procedure below.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Failing Back to Original Primary
|
||||||
|
|
||||||
|
**Scenario**: `zippy` is repaired and you want to move the NFS server role back from `c1` to `zippy`.
|
||||||
|
|
||||||
|
### Step 1: Sync Latest Data from c1 to zippy
|
||||||
|
|
||||||
|
On c1 (current primary):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create readonly snapshot of current state
|
||||||
|
sudo btrfs subvolume snapshot -r /persist/services /persist/services@failback-$(date +%Y%m%d-%H%M%S)
|
||||||
|
|
||||||
|
# Find the snapshot
|
||||||
|
FAILBACK=$(sudo ls -t /persist/services@failback-* | head -1)
|
||||||
|
|
||||||
|
# Send to zippy (use root SSH key if available, or generate temporary key)
|
||||||
|
sudo btrfs send "$FAILBACK" | ssh root@zippy "btrfs receive /persist/"
|
||||||
|
```
|
||||||
|
|
||||||
|
On zippy:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Verify snapshot arrived
|
||||||
|
ls -la /persist/services@failback-*
|
||||||
|
|
||||||
|
# Create writable subvolume from the snapshot
|
||||||
|
FAILBACK=$(ls -t /persist/services@failback-* | head -1)
|
||||||
|
sudo btrfs subvolume snapshot "$FAILBACK" /persist/services
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
ls -la /persist/services
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Update NixOS Configuration
|
||||||
|
|
||||||
|
Swap the roles back:
|
||||||
|
|
||||||
|
**In `hosts/zippy/default.nix`**:
|
||||||
|
```nix
|
||||||
|
imports = [
|
||||||
|
# ... existing imports ...
|
||||||
|
# ../../common/nfs-services-standby.nix # REMOVE THIS
|
||||||
|
../../common/nfs-services-server.nix # ADD THIS
|
||||||
|
];
|
||||||
|
|
||||||
|
nfsServicesServer.standbys = ["c1"];
|
||||||
|
```
|
||||||
|
|
||||||
|
**In `hosts/c1/default.nix`**:
|
||||||
|
```nix
|
||||||
|
imports = [
|
||||||
|
# ... existing imports ...
|
||||||
|
# ../../common/nfs-services-server.nix # REMOVE THIS
|
||||||
|
../../common/nfs-services-standby.nix # ADD THIS
|
||||||
|
];
|
||||||
|
|
||||||
|
nfsServicesStandby.replicationKeys = [
|
||||||
|
"ssh-ed25519 AAAA... root@zippy-replication" # Get from zippy:/persist/root/.ssh/btrfs-replication.pub
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Deploy Configurations
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# IMPORTANT: Deploy c1 FIRST to demote it
|
||||||
|
deploy -s '.#c1'
|
||||||
|
|
||||||
|
# Wait for c1 to stop NFS server
|
||||||
|
ssh c1 sudo systemctl status nfs-server # Should be inactive
|
||||||
|
|
||||||
|
# Then deploy zippy to promote it
|
||||||
|
deploy -s '.#zippy'
|
||||||
|
```
|
||||||
|
|
||||||
|
The order matters! If you deploy zippy first, it will see c1 is still active and refuse to start.
|
||||||
|
|
||||||
|
### Step 4: Verify Failback
|
||||||
|
|
||||||
|
Check Consul DNS points to zippy:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dig @c1 -p 8600 data-services.service.consul # Should show zippy's IP
|
||||||
|
```
|
||||||
|
|
||||||
|
Check clients are mounting from zippy:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
for host in c1 c2 c3; do
|
||||||
|
ssh $host "df -h | grep services"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 5: Clean Up Temporary Snapshots
|
||||||
|
|
||||||
|
On c1:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Remove the failback snapshot and the promoted subvolume
|
||||||
|
sudo btrfs subvolume delete /persist/services@failback-*
|
||||||
|
sudo btrfs subvolume delete /persist/services
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Adding a New Standby
|
||||||
|
|
||||||
|
**Scenario**: You want to add `c2` as an additional standby.
|
||||||
|
|
||||||
|
### Step 1: Create Standby Subvolume on c2
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh c2
|
||||||
|
sudo btrfs subvolume create /persist/services-standby
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Update c2 Configuration
|
||||||
|
|
||||||
|
**In `hosts/c2/default.nix`**:
|
||||||
|
```nix
|
||||||
|
imports = [
|
||||||
|
# ... existing imports ...
|
||||||
|
../../common/nfs-services-standby.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
nfsServicesStandby.replicationKeys = [
|
||||||
|
"ssh-ed25519 AAAA... root@zippy-replication" # Get from current NFS server
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Update NFS Server Configuration
|
||||||
|
|
||||||
|
On the current NFS server (e.g., zippy), update the standbys list:
|
||||||
|
|
||||||
|
**In `hosts/zippy/default.nix`**:
|
||||||
|
```nix
|
||||||
|
nfsServicesServer.standbys = ["c1" "c2"]; # Added c2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Deploy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
deploy -s '.#c2'
|
||||||
|
deploy -s '.#zippy'
|
||||||
|
```
|
||||||
|
|
||||||
|
The next replication cycle (within 5 minutes) will do a full send to c2, then switch to incremental.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Replication Failed
|
||||||
|
|
||||||
|
Check the replication service logs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On NFS server
|
||||||
|
sudo journalctl -u replicate-services-to-c1 -f
|
||||||
|
```
|
||||||
|
|
||||||
|
Common issues:
|
||||||
|
- SSH key not found → Run key generation step (see stateful-commands.txt)
|
||||||
|
- Permission denied → Check authorized_keys on standby
|
||||||
|
- Snapshot already exists → Old snapshot with same timestamp, wait for next cycle
|
||||||
|
|
||||||
|
### Clients Can't Mount
|
||||||
|
|
||||||
|
Check Consul:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dig @localhost -p 8600 data-services.service.consul
|
||||||
|
consul catalog services | grep data-services
|
||||||
|
```
|
||||||
|
|
||||||
|
If Consul isn't resolving:
|
||||||
|
- NFS server might not have registered → Check `sudo systemctl status nfs-server`
|
||||||
|
- Consul agent might be down → Check `sudo systemctl status consul`
|
||||||
|
|
||||||
|
### Mount is Stale
|
||||||
|
|
||||||
|
Force remount:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl restart data-services.mount
|
||||||
|
```
|
||||||
|
|
||||||
|
Or unmount and let automount handle it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo umount /data/services
|
||||||
|
ls /data/services # Triggers automount
|
||||||
|
```
|
||||||
|
|
||||||
|
### Split-Brain Prevention: NFS Server Won't Start
|
||||||
|
|
||||||
|
If you see:
|
||||||
|
```
|
||||||
|
ERROR: Another NFS server is already active at 192.168.1.X
|
||||||
|
```
|
||||||
|
|
||||||
|
This is **intentional** - the safety check is working! You have two options:
|
||||||
|
|
||||||
|
1. **Keep the other server as primary**: Update this host's config to be a standby instead
|
||||||
|
2. **Fail back to this host**: First demote the other server, sync data, then deploy both hosts in correct order
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Check Replication Status
|
||||||
|
|
||||||
|
On NFS server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List recent snapshots
|
||||||
|
ls -lt /persist/services@* | head
|
||||||
|
|
||||||
|
# Check last replication run
|
||||||
|
sudo systemctl status replicate-services-to-c1
|
||||||
|
|
||||||
|
# Check replication logs
|
||||||
|
sudo journalctl -u replicate-services-to-c1 --since "1 hour ago"
|
||||||
|
```
|
||||||
|
|
||||||
|
On standby:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List received snapshots
|
||||||
|
ls -lt /persist/services-standby/services@* | head
|
||||||
|
|
||||||
|
# Check how old the latest snapshot is
|
||||||
|
stat /persist/services-standby/services@* | grep Modify | head -1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify NFS Exports
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo showmount -e localhost
|
||||||
|
```
|
||||||
|
|
||||||
|
Should show:
|
||||||
|
```
|
||||||
|
/persist/services 192.168.1.0/24
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Consul Registration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
consul catalog services | grep data-services
|
||||||
|
dig @localhost -p 8600 data-services.service.consul
|
||||||
|
```
|
||||||
98
docs/RASPBERRY_PI_SD_IMAGE.md
Normal file
98
docs/RASPBERRY_PI_SD_IMAGE.md
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Raspberry Pi SD Image Building and Deployment
|
||||||
|
|
||||||
|
Guide for building and deploying NixOS SD card images for Raspberry Pi hosts (e.g., stinky).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Raspberry Pi hosts use a different deployment strategy than regular NixOS hosts:
|
||||||
|
- **First deployment**: Build and flash an SD card image
|
||||||
|
- **Subsequent updates**: Use `deploy-rs` like other hosts
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Storage Layout
|
||||||
|
|
||||||
|
**Partition structure** (automatically created by NixOS):
|
||||||
|
- `/boot/firmware` - FAT32 partition (label: `FIRMWARE`)
|
||||||
|
- Contains Raspberry Pi firmware, U-Boot bootloader, device trees
|
||||||
|
- `/` - tmpfs (in-memory, ephemeral root)
|
||||||
|
- 2GB RAM disk, wiped on every boot
|
||||||
|
- `/nix` - ext4 partition (label: `NIXOS_SD`)
|
||||||
|
- Nix store and persistent data
|
||||||
|
- Contains `/nix/persist` directory for impermanence
|
||||||
|
|
||||||
|
### Impermanence with tmpfs
|
||||||
|
|
||||||
|
Unlike btrfs-based hosts that use `/persist`, Pi hosts use `/nix/persist`:
|
||||||
|
- Root filesystem is tmpfs (no disk writes, auto-wiped)
|
||||||
|
- Single ext4 partition mounted at `/nix`
|
||||||
|
- Persistent data stored in `/nix/persist/` (directory, not separate mount)
|
||||||
|
- Better for SD card longevity (fewer writes)
|
||||||
|
|
||||||
|
**Persisted paths**:
|
||||||
|
- `/nix/persist/var/lib/nixos` - System state
|
||||||
|
- `/nix/persist/home/ppetru` - User home directory
|
||||||
|
- `/nix/persist/etc` - SSH host keys, machine-id
|
||||||
|
- Service-specific: `/nix/persist/var/lib/octoprint`, etc.
|
||||||
|
|
||||||
|
## Building the SD Image
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- ARM64 emulation enabled on build machine:
|
||||||
|
```nix
|
||||||
|
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||||
|
```
|
||||||
|
(Already configured in `workstation-node.nix`)
|
||||||
|
|
||||||
|
### Build Command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build SD image for stinky
|
||||||
|
nix build .#packages.aarch64-linux.stinky-sdImage
|
||||||
|
|
||||||
|
# Result location
|
||||||
|
ls -lh result/sd-image/
|
||||||
|
# nixos-sd-image-stinky-25.05-*.img.zst (compressed with zstd)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Build location**: Defined in `flake.nix`:
|
||||||
|
```nix
|
||||||
|
packages.aarch64-linux.stinky-sdImage =
|
||||||
|
self.nixosConfigurations.stinky.config.system.build.sdImage;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Flashing the SD Card
|
||||||
|
|
||||||
|
### Find SD Card Device
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Before inserting SD card
|
||||||
|
lsblk
|
||||||
|
|
||||||
|
# Insert SD card, then check again
|
||||||
|
lsblk
|
||||||
|
|
||||||
|
# Look for new device, typically:
|
||||||
|
# - /dev/sdX (USB SD card readers)
|
||||||
|
# - /dev/mmcblk0 (built-in SD card slots)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Warning**: Double-check the device! Wrong device = data loss.
|
||||||
|
|
||||||
|
### Flash Image
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Decompress and flash in one command
|
||||||
|
zstd -d -c result/sd-image/*.img.zst | sudo dd of=/dev/sdX bs=4M status=progress conv=fsync
|
||||||
|
|
||||||
|
# Or decompress first, then flash
|
||||||
|
unzstd result/sd-image/*.img.zst
|
||||||
|
sudo dd if=result/sd-image/*.img of=/dev/sdX bs=4M status=progress conv=fsync
|
||||||
|
```
|
||||||
|
|
||||||
|
### Eject SD Card
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo eject /dev/sdX
|
||||||
|
```
|
||||||
7
docs/TODO
Normal file
7
docs/TODO
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
* remote docker images used, can't come up if internet is down
|
||||||
|
* local docker images pulled from gitea, can't come up if gitea isn't up (yet)
|
||||||
|
* traefik-oidc-auth plugin downloaded from GitHub at startup (cached in /data/services/traefik/plugins-storage)
|
||||||
|
* renovate system of some kind
|
||||||
|
* vector (or other log ingestion) everywhere, consider moving it off docker if possible
|
||||||
|
* monitor backup-persist success/fail
|
||||||
|
|
||||||
916
flake.lock
generated
916
flake.lock
generated
File diff suppressed because it is too large
Load Diff
172
flake.nix
172
flake.nix
@@ -5,12 +5,16 @@
|
|||||||
deploy-rs.url = "github:serokell/deploy-rs";
|
deploy-rs.url = "github:serokell/deploy-rs";
|
||||||
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
|
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
impermanence.url = "github:nix-community/impermanence";
|
impermanence.url = "github:nix-community/impermanence";
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||||
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
|
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||||
disko.url = "github:nix-community/disko";
|
disko.url = "github:nix-community/disko";
|
||||||
disko.inputs.nixpkgs.follows = "nixpkgs";
|
disko.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
ethereum-nix = {
|
||||||
|
url = "github:nix-community/ethereum.nix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||||
|
};
|
||||||
home-manager = {
|
home-manager = {
|
||||||
url = "github:nix-community/home-manager/release-24.11";
|
url = "github:nix-community/home-manager/release-25.05";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
nix-index-database = {
|
nix-index-database = {
|
||||||
@@ -25,6 +29,16 @@
|
|||||||
url = "github:Mic92/sops-nix";
|
url = "github:Mic92/sops-nix";
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
|
browser-previews = {
|
||||||
|
url = "github:nix-community/browser-previews";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||||
|
};
|
||||||
|
omarchy-nix = {
|
||||||
|
url = "github:henrysipp/omarchy-nix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
inputs.home-manager.follows = "home-manager";
|
||||||
|
};
|
||||||
|
nixos-hardware.url = "github:NixOS/nixos-hardware/master";
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
@@ -34,56 +48,88 @@
|
|||||||
nixpkgs-unstable,
|
nixpkgs-unstable,
|
||||||
deploy-rs,
|
deploy-rs,
|
||||||
disko,
|
disko,
|
||||||
|
ethereum-nix,
|
||||||
home-manager,
|
home-manager,
|
||||||
sops-nix,
|
|
||||||
impermanence,
|
impermanence,
|
||||||
|
sops-nix,
|
||||||
|
browser-previews,
|
||||||
|
omarchy-nix,
|
||||||
|
nixos-hardware,
|
||||||
...
|
...
|
||||||
}@inputs:
|
}@inputs:
|
||||||
let
|
let
|
||||||
inherit (self);
|
inherit (self);
|
||||||
|
|
||||||
overlay-unstable = final: prev: { unstable = nixpkgs-unstable.legacyPackages.${prev.system}; };
|
overlay-unstable = final: prev: {
|
||||||
|
unstable = import nixpkgs-unstable {
|
||||||
|
inherit (prev) system;
|
||||||
|
config.allowUnfree = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
mkNixos =
|
overlay-browser-previews = final: prev: {
|
||||||
system: modules:
|
browser-previews = browser-previews.packages.${prev.system};
|
||||||
|
};
|
||||||
|
|
||||||
|
mkHost =
|
||||||
|
system: profile: modules:
|
||||||
|
let
|
||||||
|
# Profile parameter is only used by home-manager for user environment
|
||||||
|
# NixOS system configuration is handled via explicit imports in host configs
|
||||||
|
in
|
||||||
nixpkgs.lib.nixosSystem {
|
nixpkgs.lib.nixosSystem {
|
||||||
system = system;
|
system = system;
|
||||||
modules = [
|
modules = [
|
||||||
(
|
(
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
nixpkgs.overlays = [ overlay-unstable ];
|
nixpkgs.overlays = [ overlay-unstable overlay-browser-previews ];
|
||||||
nixpkgs.config.allowUnfree = true;
|
nixpkgs.config.allowUnfree = true;
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
disko.nixosModules.disko
|
disko.nixosModules.disko
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
impermanence.nixosModules.impermanence
|
impermanence.nixosModules.impermanence
|
||||||
|
home-manager.nixosModules.home-manager
|
||||||
|
(
|
||||||
|
{ lib, ... }:
|
||||||
|
lib.mkMerge [
|
||||||
|
{
|
||||||
|
home-manager = {
|
||||||
|
useGlobalPkgs = true;
|
||||||
|
useUserPackages = true;
|
||||||
|
users.ppetru = {
|
||||||
|
imports = [
|
||||||
|
inputs.nix-index-database.homeModules.nix-index
|
||||||
|
inputs.nixvim.homeModules.nixvim
|
||||||
|
./home
|
||||||
|
] ++ lib.optionals (profile == "desktop") [
|
||||||
|
omarchy-nix.homeManagerModules.default
|
||||||
|
];
|
||||||
|
};
|
||||||
|
extraSpecialArgs = {
|
||||||
|
inherit profile;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
(lib.optionalAttrs (profile == "desktop") {
|
||||||
|
omarchy = {
|
||||||
|
full_name = "Petru Paler";
|
||||||
|
email_address = "petru@paler.net";
|
||||||
|
theme = "tokyo-night";
|
||||||
|
monitors = [ "DP-1,preferred,auto,1.5" ];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
]
|
||||||
|
)
|
||||||
|
] ++ nixpkgs.lib.optionals (profile == "desktop") [
|
||||||
|
omarchy-nix.nixosModules.default
|
||||||
] ++ modules;
|
] ++ modules;
|
||||||
specialArgs = {
|
specialArgs = {
|
||||||
inherit inputs self;
|
inherit inputs self;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
mkHMNixos =
|
|
||||||
system: modules:
|
|
||||||
mkNixos system ([
|
|
||||||
home-manager.nixosModules.home-manager
|
|
||||||
{
|
|
||||||
home-manager = {
|
|
||||||
useGlobalPkgs = true;
|
|
||||||
useUserPackages = true;
|
|
||||||
users.ppetru = {
|
|
||||||
imports = [
|
|
||||||
(inputs.impermanence + "/home-manager.nix")
|
|
||||||
inputs.nix-index-database.hmModules.nix-index
|
|
||||||
inputs.nixvim.homeManagerModules.nixvim
|
|
||||||
./home
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}] ++ modules);
|
|
||||||
|
|
||||||
pkgsFor =
|
pkgsFor =
|
||||||
system:
|
system:
|
||||||
import nixpkgs {
|
import nixpkgs {
|
||||||
@@ -97,7 +143,7 @@
|
|||||||
inherit system;
|
inherit system;
|
||||||
overlays = [
|
overlays = [
|
||||||
overlay-unstable
|
overlay-unstable
|
||||||
deploy-rs.overlay
|
deploy-rs.overlays.default
|
||||||
(self: super: {
|
(self: super: {
|
||||||
deploy-rs = {
|
deploy-rs = {
|
||||||
inherit (pkgsFor system) deploy-rs;
|
inherit (pkgsFor system) deploy-rs;
|
||||||
@@ -109,12 +155,18 @@
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
nixosConfigurations = {
|
nixosConfigurations = {
|
||||||
c1 = mkHMNixos "x86_64-linux" [ ./hosts/c1 ];
|
c1 = mkHost "x86_64-linux" "minimal" [ ./hosts/c1 ];
|
||||||
c2 = mkHMNixos "x86_64-linux" [ ./hosts/c2 ];
|
c2 = mkHost "x86_64-linux" "minimal" [ ./hosts/c2 ];
|
||||||
c3 = mkHMNixos "x86_64-linux" [ ./hosts/c3 ];
|
c3 = mkHost "x86_64-linux" "minimal" [ ./hosts/c3 ];
|
||||||
alo-cloud-1 = mkHMNixos "aarch64-linux" [ ./hosts/alo-cloud-1 ];
|
alo-cloud-1 = mkHost "aarch64-linux" "cloud" [ ./hosts/alo-cloud-1 ];
|
||||||
zippy = mkHMNixos "x86_64-linux" [ ./hosts/zippy ];
|
zippy = mkHost "x86_64-linux" "minimal" [ ./hosts/zippy ];
|
||||||
chilly = mkHMNixos "x86_64-linux" [ ./hosts/chilly ];
|
chilly = mkHost "x86_64-linux" "workstation" [ ./hosts/chilly ];
|
||||||
|
sparky = mkHost "x86_64-linux" "minimal" [ ./hosts/sparky ];
|
||||||
|
beefy = mkHost "x86_64-linux" "desktop" [ ./hosts/beefy ];
|
||||||
|
stinky = mkHost "aarch64-linux" "minimal" [
|
||||||
|
nixos-hardware.nixosModules.raspberry-pi-4
|
||||||
|
./hosts/stinky
|
||||||
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
deploy = {
|
deploy = {
|
||||||
@@ -167,9 +219,63 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
sparky = {
|
||||||
|
hostname = "sparky";
|
||||||
|
profiles = {
|
||||||
|
system = {
|
||||||
|
user = "root";
|
||||||
|
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.sparky;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
beefy = {
|
||||||
|
hostname = "beefy";
|
||||||
|
profiles = {
|
||||||
|
system = {
|
||||||
|
user = "root";
|
||||||
|
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.beefy;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
stinky = {
|
||||||
|
hostname = "stinky";
|
||||||
|
profiles = {
|
||||||
|
system = {
|
||||||
|
user = "root";
|
||||||
|
path = (deployPkgsFor "aarch64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.stinky;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# SD card image for stinky (Raspberry Pi 4)
|
||||||
|
packages.aarch64-linux.stinky-sdImage = self.nixosConfigurations.stinky.config.system.build.sdImage;
|
||||||
|
|
||||||
|
# Apps - utility scripts
|
||||||
|
apps.x86_64-linux.diff-configs = {
|
||||||
|
type = "app";
|
||||||
|
program = "${(pkgsFor "x86_64-linux").writeShellScriptBin "diff-configs" (builtins.readFile ./scripts/diff-configs.sh)}/bin/diff-configs";
|
||||||
|
};
|
||||||
|
|
||||||
|
apps.aarch64-linux.diff-configs = {
|
||||||
|
type = "app";
|
||||||
|
program = "${(pkgsFor "aarch64-linux").writeShellScriptBin "diff-configs" (builtins.readFile ./scripts/diff-configs.sh)}/bin/diff-configs";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Development shells
|
||||||
|
devShells.x86_64-linux.default = (pkgsFor "x86_64-linux").mkShell {
|
||||||
|
packages = with (pkgsFor "x86_64-linux"); [
|
||||||
|
nvd
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
devShells.aarch64-linux.default = (pkgsFor "aarch64-linux").mkShell {
|
||||||
|
packages = with (pkgsFor "aarch64-linux"); [
|
||||||
|
nvd
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
||||||
|
|
||||||
formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-rfc-style;
|
formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-rfc-style;
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, profile ? "cli", ... }:
|
||||||
{
|
{
|
||||||
|
imports = [ ./programs/${profile}.nix ];
|
||||||
|
|
||||||
home = {
|
home = {
|
||||||
packages = (import ./packages.nix { inherit pkgs; }).packages;
|
packages = (import ./packages.nix { inherit pkgs profile; }).packages;
|
||||||
stateVersion = "24.05"; # TODO: unify this with the references in flake.nix:inputs
|
stateVersion = "24.05"; # TODO: unify this with the references in flake.nix:inputs
|
||||||
|
|
||||||
sessionVariables = {
|
sessionVariables = {
|
||||||
@@ -10,34 +12,27 @@
|
|||||||
MOSH_SERVER_NETWORK_TMOUT = 604800;
|
MOSH_SERVER_NETWORK_TMOUT = 604800;
|
||||||
NOMAD_ADDR = "http://nomad.service.consul:4646";
|
NOMAD_ADDR = "http://nomad.service.consul:4646";
|
||||||
LESS = "-F -i -M -+S -R -w -X -z-4";
|
LESS = "-F -i -M -+S -R -w -X -z-4";
|
||||||
SYSTEMD_LESS = "FiM+SRwXz-4";
|
SYSTEMD_LESS = "FiM+SRwX";
|
||||||
NIX_LD = "${pkgs.glibc}/lib/ld-linux-x86-64.so.2";
|
NIX_LD = "${pkgs.glibc}/lib/ld-linux-x86-64.so.2";
|
||||||
NIX_LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
|
NIX_LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
|
||||||
pkgs.stdenv.cc.cc
|
pkgs.stdenv.cc.cc
|
||||||
];
|
];
|
||||||
GEMINI_API_KEY = "AIzaSyBZkifYOFNKCjROLa_GZyzQbB2EbEYIby4";
|
GEMINI_API_KEY = "AIzaSyBZkifYOFNKCjROLa_GZyzQbB2EbEYIby4";
|
||||||
LLM_GEMINI_KEY = "AIzaSyBZkifYOFNKCjROLa_GZyzQbB2EbEYIby4";
|
LLM_GEMINI_KEY = "AIzaSyBZkifYOFNKCjROLa_GZyzQbB2EbEYIby4";
|
||||||
|
PLAYWRIGHT_BROWSERS_PATH = "${pkgs.unstable.playwright-driver.browsers}";
|
||||||
|
NIXOS_OZONE_WL = "1";
|
||||||
};
|
};
|
||||||
|
|
||||||
shellAliases = {
|
shellAliases = {
|
||||||
reload-home-manager-config = "home-manager switch --flake ${builtins.toString ./.}";
|
reload-home-manager-config = "home-manager switch --flake ${builtins.toString ./.}";
|
||||||
};
|
};
|
||||||
|
|
||||||
persistence."/persist/home/ppetru" = {
|
file.".ssh/rc".text = ''
|
||||||
directories = [
|
#!/bin/sh
|
||||||
".cache/nix"
|
if test "$SSH_AUTH_SOCK"; then
|
||||||
".cache/nix-index"
|
ln -sf "$SSH_AUTH_SOCK" "$HOME/.ssh/ssh_auth_sock"
|
||||||
".config/io.datasette.llm/"
|
fi
|
||||||
".config/sops/"
|
'';
|
||||||
".docker/"
|
file.".ssh/rc".executable = true;
|
||||||
".local/share/fish"
|
|
||||||
".ssh"
|
|
||||||
"projects"
|
|
||||||
];
|
|
||||||
files = [ ];
|
|
||||||
allowOther = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
programs = import ./programs.nix { inherit pkgs; };
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,7 @@
|
|||||||
{ pkgs }:
|
{ pkgs, profile ? "workstation" }:
|
||||||
let
|
let
|
||||||
corePkgs = with pkgs; [
|
profilePackages = import ./profiles/${profile}.nix { inherit pkgs; };
|
||||||
direnv
|
|
||||||
fzf
|
|
||||||
git
|
|
||||||
mosh
|
|
||||||
ripgrep
|
|
||||||
tmux
|
|
||||||
zsh
|
|
||||||
];
|
|
||||||
|
|
||||||
pythonEnv = pkgs.unstable.python3.withPackages (ps: [
|
|
||||||
ps.aider-chat
|
|
||||||
ps.google-generativeai
|
|
||||||
ps.ipython
|
|
||||||
ps.llm
|
|
||||||
ps.llm-gemini
|
|
||||||
]);
|
|
||||||
|
|
||||||
fishPkgs = with pkgs.fishPlugins; [
|
|
||||||
pure
|
|
||||||
# don't add failed commands to history
|
|
||||||
sponge
|
|
||||||
transient-fish
|
|
||||||
];
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
packages = corePkgs ++ [ pythonEnv ] ++ fishPkgs;
|
packages = profilePackages.packages;
|
||||||
}
|
}
|
||||||
|
|||||||
22
home/profiles/cloud.nix
Normal file
22
home/profiles/cloud.nix
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{ pkgs }:
|
||||||
|
let
|
||||||
|
corePkgs = with pkgs; [
|
||||||
|
direnv
|
||||||
|
fzf
|
||||||
|
git
|
||||||
|
mosh
|
||||||
|
ripgrep
|
||||||
|
tmux
|
||||||
|
zsh
|
||||||
|
];
|
||||||
|
|
||||||
|
fishPkgs = with pkgs.fishPlugins; [
|
||||||
|
pure
|
||||||
|
# don't add failed commands to history
|
||||||
|
sponge
|
||||||
|
transient-fish
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = corePkgs ++ fishPkgs;
|
||||||
|
}
|
||||||
13
home/profiles/desktop.nix
Normal file
13
home/profiles/desktop.nix
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{ pkgs }:
|
||||||
|
let
|
||||||
|
workstationProfile = import ./workstation.nix { inherit pkgs; };
|
||||||
|
|
||||||
|
desktopPkgs = with pkgs; [
|
||||||
|
browser-previews.google-chrome
|
||||||
|
foot # Wayland-native terminal emulator
|
||||||
|
wofi # Application launcher for Wayland
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = workstationProfile.packages ++ desktopPkgs;
|
||||||
|
}
|
||||||
5
home/profiles/minimal.nix
Normal file
5
home/profiles/minimal.nix
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{ pkgs }:
|
||||||
|
{
|
||||||
|
# Minimal profile: reuses server.nix for basic package list
|
||||||
|
packages = (import ./server.nix { inherit pkgs; }).packages;
|
||||||
|
}
|
||||||
22
home/profiles/server.nix
Normal file
22
home/profiles/server.nix
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{ pkgs }:
|
||||||
|
let
|
||||||
|
corePkgs = with pkgs; [
|
||||||
|
direnv
|
||||||
|
fzf
|
||||||
|
git
|
||||||
|
mosh
|
||||||
|
ripgrep
|
||||||
|
tmux
|
||||||
|
zsh
|
||||||
|
];
|
||||||
|
|
||||||
|
fishPkgs = with pkgs.fishPlugins; [
|
||||||
|
pure
|
||||||
|
# don't add failed commands to history
|
||||||
|
# sponge
|
||||||
|
transient-fish
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = corePkgs ++ fishPkgs;
|
||||||
|
}
|
||||||
20
home/profiles/workstation.nix
Normal file
20
home/profiles/workstation.nix
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{ pkgs }:
|
||||||
|
let
|
||||||
|
serverProfile = import ./server.nix { inherit pkgs; };
|
||||||
|
|
||||||
|
cliPkgs = with pkgs; [
|
||||||
|
unstable.claude-code
|
||||||
|
unstable.codex
|
||||||
|
unstable.gemini-cli
|
||||||
|
];
|
||||||
|
|
||||||
|
pythonEnv = pkgs.unstable.python3.withPackages (ps: [
|
||||||
|
ps.google-generativeai
|
||||||
|
ps.ipython
|
||||||
|
ps.llm
|
||||||
|
ps.llm-gemini
|
||||||
|
]);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages = serverProfile.packages ++ cliPkgs ++ [ pythonEnv ];
|
||||||
|
}
|
||||||
8
home/programs/cloud.nix
Normal file
8
home/programs/cloud.nix
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [ ./server.nix ];
|
||||||
|
|
||||||
|
# Cloud-specific home-manager programs
|
||||||
|
# Currently uses server profile's minimal CLI setup
|
||||||
|
# Add cloud-specific customizations here if needed in the future
|
||||||
|
}
|
||||||
27
home/programs/desktop.nix
Normal file
27
home/programs/desktop.nix
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [ ./workstation.nix ];
|
||||||
|
|
||||||
|
# Override ghostty to use unstable version (1.2.0+) for ssh-terminfo support
|
||||||
|
programs.ghostty.package = pkgs.unstable.ghostty;
|
||||||
|
|
||||||
|
wayland.windowManager.hyprland = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
# Remap CapsLock to Super (Mod4)
|
||||||
|
"$mod" = "SUPER";
|
||||||
|
|
||||||
|
input = {
|
||||||
|
kb_options = "caps:super";
|
||||||
|
};
|
||||||
|
|
||||||
|
"$browser" = "google-chrome-stable --new-window --ozone-platform=wayland";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Extend ghostty configuration from omarchy-nix
|
||||||
|
programs.ghostty.settings = {
|
||||||
|
# Automatically handle TERM compatibility for SSH (requires ghostty 1.2.0+)
|
||||||
|
shell-integration-features = "ssh-terminfo";
|
||||||
|
};
|
||||||
|
}
|
||||||
5
home/programs/minimal.nix
Normal file
5
home/programs/minimal.nix
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
# Minimal profile: reuses server.nix for basic CLI programs
|
||||||
|
imports = [ ./server.nix ];
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
{
|
{
|
||||||
dircolors = {
|
programs = {
|
||||||
|
dircolors = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraConfig = ''
|
extraConfig = ''
|
||||||
# Dark 256 color solarized theme for the color GNU ls utility.
|
# Dark 256 color solarized theme for the color GNU ls utility.
|
||||||
@@ -315,88 +316,94 @@
|
|||||||
.ogx 01;38;5;166
|
.ogx 01;38;5;166
|
||||||
'';
|
'';
|
||||||
settings = pkgs.lib.mkForce { };
|
settings = pkgs.lib.mkForce { };
|
||||||
};
|
|
||||||
|
|
||||||
direnv = {
|
|
||||||
enable = true;
|
|
||||||
nix-direnv.enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
fish = {
|
|
||||||
enable = true;
|
|
||||||
|
|
||||||
shellAbbrs = {
|
|
||||||
fix-ssh = "eval $(tmux show-env | grep ^SSH_AUTH_SOCK | sed 's/=/ /;s/^/set /')";
|
|
||||||
diff-persist = "sudo rsync -amvxx --dry-run --no-links --exclude '/tmp/*' --exclude '/root/*' / /persist/ | rg -v '^skipping|/$'";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
shellInit = ''
|
direnv = {
|
||||||
set fish_greeting
|
enable = true;
|
||||||
|
nix-direnv.enable = true;
|
||||||
set pure_color_mute green
|
|
||||||
set pure_check_for_new_release false
|
|
||||||
set pure_enable_nixdevshell true
|
|
||||||
set pure_show_prefix_root_prompt true
|
|
||||||
set sponge_regex_patterns 'password|passwd'
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
fzf = {
|
|
||||||
enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
git = {
|
|
||||||
enable = true;
|
|
||||||
userEmail = "petru@paler.net";
|
|
||||||
userName = "Petru Paler";
|
|
||||||
};
|
|
||||||
|
|
||||||
home-manager = {
|
|
||||||
enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
less.enable = true;
|
|
||||||
|
|
||||||
lesspipe.enable = false;
|
|
||||||
|
|
||||||
nix-index-database.comma.enable = true;
|
|
||||||
|
|
||||||
nixvim = {
|
|
||||||
enable = true;
|
|
||||||
|
|
||||||
defaultEditor = true;
|
|
||||||
viAlias = true;
|
|
||||||
# makes lessopen complain sometimes
|
|
||||||
vimAlias = false;
|
|
||||||
|
|
||||||
opts = {
|
|
||||||
tabstop = 4;
|
|
||||||
softtabstop = 4;
|
|
||||||
shiftwidth = 4;
|
|
||||||
expandtab = true;
|
|
||||||
shiftround = true;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
plugins = {
|
fish = {
|
||||||
nix.enable = true;
|
enable = true;
|
||||||
|
|
||||||
|
shellAbbrs = {
|
||||||
|
diff-persist = "sudo rsync -amvxx --dry-run --no-links --exclude '/tmp/*' --exclude '/root/*' / /persist/ | rg -v '^skipping|/$'";
|
||||||
|
};
|
||||||
|
|
||||||
|
shellInit = ''
|
||||||
|
set fish_greeting
|
||||||
|
|
||||||
|
set pure_color_mute green
|
||||||
|
set pure_check_for_new_release false
|
||||||
|
set pure_enable_nixdevshell true
|
||||||
|
set pure_show_prefix_root_prompt true
|
||||||
|
set sponge_regex_patterns 'password|passwd'
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
tmux = {
|
fzf = {
|
||||||
enable = true;
|
enable = true;
|
||||||
prefix = "C-t";
|
};
|
||||||
terminal = "screen-256color";
|
|
||||||
historyLimit = 20000;
|
|
||||||
keyMode = "vi";
|
|
||||||
extraConfig = ''
|
|
||||||
bind-key t send-prefix
|
|
||||||
bind-key C-t last-window
|
|
||||||
|
|
||||||
set -g status-left ""
|
git = {
|
||||||
set -g status-right ""
|
enable = true;
|
||||||
|
userEmail = "petru@paler.net";
|
||||||
|
userName = "Petru Paler";
|
||||||
|
};
|
||||||
|
|
||||||
setw -g automatic-rename on
|
home-manager = {
|
||||||
set -g set-titles on
|
enable = true;
|
||||||
'';
|
};
|
||||||
|
|
||||||
|
less.enable = true;
|
||||||
|
|
||||||
|
lesspipe.enable = false;
|
||||||
|
|
||||||
|
nix-index-database.comma.enable = true;
|
||||||
|
|
||||||
|
nixvim = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
defaultEditor = true;
|
||||||
|
viAlias = true;
|
||||||
|
# makes lessopen complain sometimes
|
||||||
|
vimAlias = false;
|
||||||
|
|
||||||
|
opts = {
|
||||||
|
tabstop = 4;
|
||||||
|
softtabstop = 4;
|
||||||
|
shiftwidth = 4;
|
||||||
|
expandtab = true;
|
||||||
|
shiftround = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
plugins = {
|
||||||
|
nix.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
tmux = {
|
||||||
|
enable = true;
|
||||||
|
prefix = "C-t";
|
||||||
|
terminal = "screen-256color";
|
||||||
|
historyLimit = 20000;
|
||||||
|
keyMode = "vi";
|
||||||
|
extraConfig = ''
|
||||||
|
bind-key t send-prefix
|
||||||
|
bind-key C-t last-window
|
||||||
|
|
||||||
|
set -g status-left ""
|
||||||
|
set -g status-right ""
|
||||||
|
|
||||||
|
setw -g automatic-rename on
|
||||||
|
set -g set-titles on
|
||||||
|
|
||||||
|
# first, unset update-environment[SSH_AUTH_SOCK] (idx 3), to prevent
|
||||||
|
# the client overriding the global value
|
||||||
|
set-option -g -u update-environment[3]
|
||||||
|
# And set the global value to our static symlink'd path:
|
||||||
|
set-environment -g SSH_AUTH_SOCK $HOME/.ssh/ssh_auth_sock
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
6
home/programs/workstation.nix
Normal file
6
home/programs/workstation.nix
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [ ./server.nix ];
|
||||||
|
|
||||||
|
# Add workstation-specific programs here if needed in the future
|
||||||
|
}
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
{ pkgs, inputs, ... }:
|
{ pkgs, lib, inputs, ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/cloud-node.nix
|
../../common/minimal-node.nix
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
./reverse-proxy.nix
|
./reverse-proxy.nix
|
||||||
];
|
];
|
||||||
@@ -12,4 +12,27 @@
|
|||||||
|
|
||||||
networking.hostName = "alo-cloud-1";
|
networking.hostName = "alo-cloud-1";
|
||||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kbdARC7CNTRL-pNQddmWV9q5C2sRV3WGep5ehjJ1qvcfD";
|
services.tailscaleAutoconnect.authkey = "tskey-auth-kbdARC7CNTRL-pNQddmWV9q5C2sRV3WGep5ehjJ1qvcfD";
|
||||||
|
|
||||||
|
services.tailscale = {
|
||||||
|
enable = true;
|
||||||
|
useRoutingFeatures = lib.mkForce "server"; # enables IPv4/IPv6 forwarding + loose rp_filter
|
||||||
|
extraUpFlags = [ "--advertise-exit-node" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.nat = {
|
||||||
|
enable = true;
|
||||||
|
externalInterface = "enp1s0";
|
||||||
|
internalInterfaces = [ "tailscale0" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall = {
|
||||||
|
enable = lib.mkForce true;
|
||||||
|
allowedTCPPorts = [ 80 443 ]; # Public web traffic only
|
||||||
|
allowedUDPPorts = [ 41641 ]; # Tailscale
|
||||||
|
trustedInterfaces = [ "tailscale0" ]; # Full access via VPN
|
||||||
|
};
|
||||||
|
|
||||||
|
services.openssh = {
|
||||||
|
settings.PasswordAuthentication = false; # Keys only
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{ pkgs, ... }:
|
{ pkgs, config, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.traefik ];
|
environment.systemPackages = [ pkgs.traefik ];
|
||||||
environment.persistence."/persist".files = [ "/acme/acme.json" ];
|
environment.persistence.${config.custom.impermanence.persistPath}.files = [ "/acme/acme.json" ];
|
||||||
|
|
||||||
services.traefik = {
|
services.traefik = {
|
||||||
enable = true;
|
enable = true;
|
||||||
@@ -73,7 +73,7 @@
|
|||||||
wordpress-paler-net = {
|
wordpress-paler-net = {
|
||||||
entryPoints = "websecure";
|
entryPoints = "websecure";
|
||||||
rule = "Host(`wordpress.paler.net`)";
|
rule = "Host(`wordpress.paler.net`)";
|
||||||
service = "alo-cluster";
|
service = "varnish-cache";
|
||||||
};
|
};
|
||||||
|
|
||||||
ines-paler-net = {
|
ines-paler-net = {
|
||||||
@@ -117,6 +117,12 @@
|
|||||||
rule = "Host(`musictogethersilvercoast.pt`)";
|
rule = "Host(`musictogethersilvercoast.pt`)";
|
||||||
service = "varnish-cache";
|
service = "varnish-cache";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
alo-land = {
|
||||||
|
entryPoints = "websecure";
|
||||||
|
rule = "Host(`alo.land`)";
|
||||||
|
service = "varnish-cache";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -135,6 +141,15 @@
|
|||||||
.host = "100.64.229.126";
|
.host = "100.64.229.126";
|
||||||
.port = "10080";
|
.port = "10080";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub vcl_backend_response {
|
||||||
|
# default TTL if backend didn't specify one
|
||||||
|
if (beresp.ttl <= 0s) {
|
||||||
|
set beresp.ttl = 1h;
|
||||||
|
}
|
||||||
|
# serve stale content in case home link is down
|
||||||
|
set beresp.grace = 240h;
|
||||||
|
}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
24
hosts/beefy/default.nix
Normal file
24
hosts/beefy/default.nix
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{ pkgs, inputs, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../../common/encrypted-btrfs-layout.nix
|
||||||
|
../../common/global
|
||||||
|
../../common/desktop-node.nix # Hyprland + GUI environment
|
||||||
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
|
../../common/cluster-tools.nix # Nomad CLI (no service)
|
||||||
|
./hardware.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
diskLayout = {
|
||||||
|
mainDiskDevice = "/dev/disk/by-id/nvme-CT1000P3PSSD8_25164F81F31D";
|
||||||
|
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650797-0:0";
|
||||||
|
keyDiskDevice = "/dev/sda";
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostName = "beefy";
|
||||||
|
networking.cluster.primaryInterface = "enp1s0";
|
||||||
|
services.tailscaleAutoconnect.authkey = "tskey-auth-k79UsDTw2v11CNTRL-oYqji35BE9c7CqM89Dzs9cBF14PmqYsi";
|
||||||
|
|
||||||
|
# Enable all SysRq functions for debugging hangs
|
||||||
|
boot.kernel.sysctl."kernel.sysrq" = 1;
|
||||||
|
}
|
||||||
19
hosts/beefy/hardware.nix
Normal file
19
hosts/beefy/hardware.nix
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
|
||||||
|
|
||||||
|
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "usbhid" "usb_storage" "sd_mod" ];
|
||||||
|
boot.initrd.kernelModules = [ ];
|
||||||
|
boot.kernelModules = [ "kvm-amd" ];
|
||||||
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
|
nixpkgs.hostPlatform = "x86_64-linux";
|
||||||
|
hardware.cpu.amd.updateMicrocode = true; # Uncomment for AMD
|
||||||
|
}
|
||||||
BIN
hosts/beefy/key.bin
Normal file
BIN
hosts/beefy/key.bin
Normal file
Binary file not shown.
@@ -3,7 +3,14 @@
|
|||||||
imports = [
|
imports = [
|
||||||
../../common/encrypted-btrfs-layout.nix
|
../../common/encrypted-btrfs-layout.nix
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/compute-node.nix
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
|
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||||
|
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||||
|
../../common/nfs-services-standby.nix # NFS standby for /data/services
|
||||||
|
# To promote to NFS server (during failover):
|
||||||
|
# 1. Follow procedure in docs/NFS_FAILOVER.md
|
||||||
|
# 2. Replace above line with: ../../common/nfs-services-server.nix
|
||||||
|
# 3. Add nfsServicesServer.standbys = [ "c2" ]; (or leave empty)
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -15,4 +22,9 @@
|
|||||||
|
|
||||||
networking.hostName = "c1";
|
networking.hostName = "c1";
|
||||||
services.tailscaleAutoconnect.authkey = "tskey-auth-k2nQ771YHM11CNTRL-YVpoumL2mgR6nLPG51vNhRpEKMDN7gLAi";
|
services.tailscaleAutoconnect.authkey = "tskey-auth-k2nQ771YHM11CNTRL-YVpoumL2mgR6nLPG51vNhRpEKMDN7gLAi";
|
||||||
|
|
||||||
|
nfsServicesStandby.replicationKeys = [
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHyTKsMCbwCIlMcC/aopgz5Yfx/Q9QdlWC9jzMLgYFAV root@zippy-replication"
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO5s73FSUiysHijWRGYCJY8lCtZkX1DGKAqp2671REDq root@sparky-replication"
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,16 +3,18 @@
|
|||||||
imports = [
|
imports = [
|
||||||
../../common/encrypted-btrfs-layout.nix
|
../../common/encrypted-btrfs-layout.nix
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/compute-node.nix
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
|
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||||
|
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
diskLayout = {
|
diskLayout = {
|
||||||
mainDiskDevice = "/dev/disk/by-id/nvme-SAMSUNG_MZVLB256HAHQ-000H1_S425NA1M132963";
|
mainDiskDevice = "/dev/disk/by-id/nvme-KINGSTON_SNV3S1000G_50026B73841C1892";
|
||||||
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650675-0:0";
|
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650675-0:0";
|
||||||
keyDiskDevice = "/dev/sda";
|
keyDiskDevice = "/dev/sda";
|
||||||
};
|
};
|
||||||
|
|
||||||
networking.hostName = "c2";
|
networking.hostName = "c2";
|
||||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kbYnZK2CNTRL-SpUVCuzS6P3ApJiDaB6RM3M4b8M9TXgS";
|
services.tailscaleAutoconnect.authkey = "tskey-auth-kQ11fTmrzd11CNTRL-N4c2L3SAzUbvcAVhqCFWUbAEasJNTknd";
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,10 @@
|
|||||||
imports = [
|
imports = [
|
||||||
../../common/encrypted-btrfs-layout.nix
|
../../common/encrypted-btrfs-layout.nix
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/compute-node.nix
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
|
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||||
|
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||||
|
../../common/binary-cache-server.nix
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,9 @@
|
|||||||
imports = [
|
imports = [
|
||||||
../../common/encrypted-btrfs-layout.nix
|
../../common/encrypted-btrfs-layout.nix
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/base-node.nix
|
../../common/workstation-node.nix # Dev tools (deploy-rs, docker, nix-ld)
|
||||||
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
|
../../common/cluster-tools.nix # Nomad CLI (no service)
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -19,8 +21,8 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
networking.hostName = "chilly";
|
networking.hostName = "chilly";
|
||||||
|
networking.cluster.primaryInterface = "br0";
|
||||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kRXS9oPyPm11CNTRL-BE6YnbP9J6ZZuV9dHkX17ZMnm1JGdu93";
|
services.tailscaleAutoconnect.authkey = "tskey-auth-kRXS9oPyPm11CNTRL-BE6YnbP9J6ZZuV9dHkX17ZMnm1JGdu93";
|
||||||
services.consul.interface.advertise = lib.mkForce "br0";
|
|
||||||
|
|
||||||
networking.useNetworkd = true;
|
networking.useNetworkd = true;
|
||||||
systemd.network.enable = true;
|
systemd.network.enable = true;
|
||||||
|
|||||||
26
hosts/sparky/default.nix
Normal file
26
hosts/sparky/default.nix
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
{ pkgs, inputs, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../../common/encrypted-btrfs-layout.nix
|
||||||
|
../../common/global
|
||||||
|
../../common/cluster-member.nix
|
||||||
|
../../common/nomad-worker.nix
|
||||||
|
../../common/nfs-services-server.nix
|
||||||
|
# To move NFS server role to another host:
|
||||||
|
# 1. Follow procedure in docs/NFS_FAILOVER.md
|
||||||
|
# 2. Replace above line with: ../../common/nfs-services-standby.nix
|
||||||
|
# 3. Add nfsServicesStandby.replicationKeys with the new server's public key
|
||||||
|
./hardware.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
diskLayout = {
|
||||||
|
mainDiskDevice = "/dev/disk/by-id/nvme-KIOXIA-EXCERIA_with_Heatsink_SSD_84GF7016FA4S";
|
||||||
|
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777660468-0:0";
|
||||||
|
keyDiskDevice = "/dev/sda";
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostName = "sparky";
|
||||||
|
services.tailscaleAutoconnect.authkey = "tskey-auth-k6VC79UrzN11CNTRL-rvPmd4viyrQ261ifCrfTrQve7c2FesxrG";
|
||||||
|
|
||||||
|
nfsServicesServer.standbys = [ "c1" ];
|
||||||
|
}
|
||||||
21
hosts/sparky/hardware.nix
Normal file
21
hosts/sparky/hardware.nix
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
|
||||||
|
|
||||||
|
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "usb_storage" "usbhid" "sd_mod" "rtsx_pci_sdmmc" ];
|
||||||
|
boot.initrd.kernelModules = [ ];
|
||||||
|
boot.kernelModules = [
|
||||||
|
"kvm-intel"
|
||||||
|
];
|
||||||
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
|
nixpkgs.hostPlatform = "x86_64-linux";
|
||||||
|
hardware.cpu.intel.updateMicrocode = true;
|
||||||
|
}
|
||||||
BIN
hosts/sparky/key.bin
Normal file
BIN
hosts/sparky/key.bin
Normal file
Binary file not shown.
61
hosts/stinky/default.nix
Normal file
61
hosts/stinky/default.nix
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
../../common/global
|
||||||
|
../../common/impermanence-common.nix # Impermanence with custom root config (see hardware.nix)
|
||||||
|
../../common/resource-limits.nix
|
||||||
|
../../common/sshd.nix
|
||||||
|
../../common/user-ppetru.nix
|
||||||
|
../../common/wifi.nix
|
||||||
|
# Note: No systemd-boot.nix - Raspberry Pi uses generic-extlinux-compatible (from sd-image module)
|
||||||
|
./hardware.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
hardware = {
|
||||||
|
raspberry-pi."4".apply-overlays-dtmerge.enable = true;
|
||||||
|
deviceTree = {
|
||||||
|
enable = true;
|
||||||
|
filter = "*rpi-4-*.dtb";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.hostName = "stinky";
|
||||||
|
|
||||||
|
# Configure impermanence for tmpfs root (filesystem config in hardware.nix)
|
||||||
|
custom.impermanence.persistPath = "/nix/persist";
|
||||||
|
|
||||||
|
# Tailscale configuration
|
||||||
|
services.tailscaleAutoconnect.authkey = "tskey-auth-kZC8HX3wSw11CNTRL-7QvqxAphyzM7QeMUTKXv2Ng2RK4XCmg9A";
|
||||||
|
|
||||||
|
# OctoPrint for 3D printer
|
||||||
|
services.octoprint = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Persist OctoPrint data
|
||||||
|
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||||
|
"/var/lib/octoprint"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Pi HQ Camera support
|
||||||
|
boot.kernelModules = [ "bcm2835-v4l2" ];
|
||||||
|
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
libcamera
|
||||||
|
libraspberrypi
|
||||||
|
raspberrypi-eeprom
|
||||||
|
];
|
||||||
|
|
||||||
|
# Firewall: Allow access to OctoPrint
|
||||||
|
networking.firewall.allowedTCPPorts = [
|
||||||
|
5000 # OctoPrint
|
||||||
|
];
|
||||||
|
|
||||||
|
# Override global default (stinky is a new system with 25.05)
|
||||||
|
system.stateVersion = lib.mkForce "25.05";
|
||||||
|
}
|
||||||
73
hosts/stinky/hardware.nix
Normal file
73
hosts/stinky/hardware.nix
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(modulesPath + "/installer/sd-card/sd-image-aarch64.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
# Raspberry Pi 4 platform
|
||||||
|
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
|
||||||
|
|
||||||
|
# Disable ZFS (not needed, and broken with latest kernel)
|
||||||
|
boot.supportedFilesystems.zfs = lib.mkForce false;
|
||||||
|
|
||||||
|
# Boot configuration - provided by sd-image-aarch64.nix
|
||||||
|
# (grub disabled, generic-extlinux-compatible enabled, U-Boot setup)
|
||||||
|
|
||||||
|
# /boot/firmware is automatically configured by sd-image module
|
||||||
|
# Device: /dev/disk/by-label/FIRMWARE (vfat)
|
||||||
|
|
||||||
|
# tmpfs root with impermanence
|
||||||
|
# Override sd-image module's ext4 root definition with mkForce
|
||||||
|
fileSystems."/" = lib.mkForce {
|
||||||
|
device = "none";
|
||||||
|
fsType = "tmpfs";
|
||||||
|
options = [
|
||||||
|
"defaults"
|
||||||
|
"size=2G"
|
||||||
|
"mode=755"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# The SD partition contains /nix/store and /nix/persist at its root
|
||||||
|
# Mount it at a hidden location, then bind mount its /nix to /nix
|
||||||
|
fileSystems."/mnt/nixos-sd" = {
|
||||||
|
device = "/dev/disk/by-label/NIXOS_SD";
|
||||||
|
fsType = "ext4";
|
||||||
|
options = [ "noatime" ];
|
||||||
|
neededForBoot = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Bind mount /nix from the SD partition
|
||||||
|
fileSystems."/nix" = {
|
||||||
|
device = "/mnt/nixos-sd/nix";
|
||||||
|
fsType = "none";
|
||||||
|
options = [ "bind" ];
|
||||||
|
neededForBoot = true;
|
||||||
|
depends = [ "/mnt/nixos-sd" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
# No swap on SD card (wear concern)
|
||||||
|
swapDevices = [ ];
|
||||||
|
|
||||||
|
# SD image build configuration
|
||||||
|
sdImage = {
|
||||||
|
compressImage = true;
|
||||||
|
|
||||||
|
# Populate root with directories
|
||||||
|
populateRootCommands = ''
|
||||||
|
mkdir -p ./files/boot
|
||||||
|
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
|
||||||
|
|
||||||
|
# Create /nix/persist directory structure for impermanence
|
||||||
|
mkdir -p ./files/nix/persist/var/lib/nixos
|
||||||
|
mkdir -p ./files/nix/persist/home/ppetru
|
||||||
|
mkdir -p ./files/nix/persist/etc
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -3,8 +3,8 @@
|
|||||||
imports = [
|
imports = [
|
||||||
../../common/encrypted-btrfs-layout.nix
|
../../common/encrypted-btrfs-layout.nix
|
||||||
../../common/global
|
../../common/global
|
||||||
../../common/compute-node.nix
|
../../common/cluster-member.nix # Consul + storage clients
|
||||||
../../common/dev-node.nix
|
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||||
./hardware.nix
|
./hardware.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
374
scripts/diff-configs.sh
Executable file
374
scripts/diff-configs.sh
Executable file
@@ -0,0 +1,374 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Compare NixOS configurations between current state and HEAD
|
||||||
|
# Shows what would change if you committed the current changes
|
||||||
|
#
|
||||||
|
# Requirements: nvd must be in PATH
|
||||||
|
# Run inside `nix develop` or with direnv enabled
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Normalize nix store paths by replacing 32-char hashes with placeholder
|
||||||
|
normalize_nix_paths() {
|
||||||
|
sed -E 's|/nix/store/[a-z0-9]{32}-|/nix/store/HASH-|g'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Filter diff output to remove hunks where only nix store hashes differ
|
||||||
|
# Returns: filtered diff (empty if only hash changes), exit code 0 if real changes found
|
||||||
|
filter_hash_only_diffs() {
|
||||||
|
local diff_output="$1"
|
||||||
|
local current_hunk=""
|
||||||
|
local output=""
|
||||||
|
local has_real_changes=false
|
||||||
|
|
||||||
|
# Process line by line
|
||||||
|
while IFS= read -r line || [ -n "$line" ]; do
|
||||||
|
if [[ "$line" =~ ^@@ ]]; then
|
||||||
|
# New hunk starts - process previous one if it exists
|
||||||
|
if [ -n "$current_hunk" ]; then
|
||||||
|
if hunk_has_real_changes "$current_hunk"; then
|
||||||
|
output+="$current_hunk"$'\n'
|
||||||
|
has_real_changes=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# Start new hunk
|
||||||
|
current_hunk="$line"$'\n'
|
||||||
|
else
|
||||||
|
# Add line to current hunk
|
||||||
|
current_hunk+="$line"$'\n'
|
||||||
|
fi
|
||||||
|
done <<< "$diff_output"
|
||||||
|
|
||||||
|
# Process last hunk
|
||||||
|
if [ -n "$current_hunk" ]; then
|
||||||
|
if hunk_has_real_changes "$current_hunk"; then
|
||||||
|
output+="$current_hunk"
|
||||||
|
has_real_changes=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove trailing newline
|
||||||
|
output="${output%$'\n'}"
|
||||||
|
|
||||||
|
if [ "$has_real_changes" = true ]; then
|
||||||
|
echo "$output"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if a diff hunk has real changes (not just hash changes)
|
||||||
|
hunk_has_real_changes() {
|
||||||
|
local hunk="$1"
|
||||||
|
|
||||||
|
# Use temp file to avoid bash here-string issues
|
||||||
|
local temp_hunk=$(mktemp)
|
||||||
|
printf '%s' "$hunk" > "$temp_hunk"
|
||||||
|
|
||||||
|
local minus_lines=()
|
||||||
|
local plus_lines=()
|
||||||
|
|
||||||
|
# Extract - and + lines (skip @@ and context lines)
|
||||||
|
while IFS= read -r line || [ -n "$line" ]; do
|
||||||
|
if [[ "$line" =~ ^- && ! "$line" =~ ^--- ]]; then
|
||||||
|
minus_lines+=("${line:1}") # Remove the - prefix
|
||||||
|
elif [[ "$line" =~ ^\+ && ! "$line" =~ ^\+\+\+ ]]; then
|
||||||
|
plus_lines+=("${line:1}") # Remove the + prefix
|
||||||
|
fi
|
||||||
|
done < "$temp_hunk"
|
||||||
|
|
||||||
|
rm -f "$temp_hunk"
|
||||||
|
|
||||||
|
# If counts don't match, there are structural changes
|
||||||
|
if [ ${#minus_lines[@]} -ne ${#plus_lines[@]} ]; then
|
||||||
|
return 0 # Has real changes
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If no changes at all, skip
|
||||||
|
if [ ${#minus_lines[@]} -eq 0 ]; then
|
||||||
|
return 1 # No real changes
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Compare each pair of lines after normalization
|
||||||
|
for i in "${!minus_lines[@]}"; do
|
||||||
|
local minus_norm=$(echo "${minus_lines[$i]}" | normalize_nix_paths)
|
||||||
|
local plus_norm=$(echo "${plus_lines[$i]}" | normalize_nix_paths)
|
||||||
|
|
||||||
|
if [ "$minus_norm" != "$plus_norm" ]; then
|
||||||
|
return 0 # Has real changes
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# All changes are hash-only
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for nvd
|
||||||
|
if ! command -v nvd &> /dev/null; then
|
||||||
|
echo "Error: nvd not found in PATH"
|
||||||
|
echo "Run this script inside 'nix develop' or enable direnv"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse flags
|
||||||
|
verbose=false
|
||||||
|
deep=false
|
||||||
|
hosts_args=()
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: $0 [-v|--verbose] [-d|--deep] [HOST...]"
|
||||||
|
echo "Compare NixOS configurations between working tree and HEAD"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -v, --verbose Show detailed list of added/removed store paths"
|
||||||
|
echo " -d, --deep Show content diffs of changed files (implies -v)"
|
||||||
|
echo ""
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " HOST One or more hostnames to compare (default: all)"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " $0 # Compare all hosts (summary)"
|
||||||
|
echo " $0 -v c1 # Compare c1 with path list"
|
||||||
|
echo " $0 --deep c1 # Compare c1 with content diffs"
|
||||||
|
echo " $0 c1 c2 c3 # Compare only c1, c2, c3"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-v|--verbose)
|
||||||
|
verbose=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-d|--deep)
|
||||||
|
deep=true
|
||||||
|
verbose=true # deep implies verbose
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
hosts_args+=("$1")
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Restore positional parameters
|
||||||
|
set -- "${hosts_args[@]}"
|
||||||
|
|
||||||
|
# Check if we're in a git repo
|
||||||
|
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||||
|
echo "Error: Not in a git repository"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if there are any changes
|
||||||
|
if git diff --quiet && git diff --cached --quiet; then
|
||||||
|
echo "No changes detected between working tree and HEAD"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Comparing configurations: current working tree vs HEAD"
|
||||||
|
echo "======================================================="
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Get list of hosts to compare
|
||||||
|
if [ $# -gt 0 ]; then
|
||||||
|
# Use hosts provided as arguments
|
||||||
|
hosts="$@"
|
||||||
|
echo -e "${YELLOW}Comparing selected hosts: $hosts${NC}"
|
||||||
|
else
|
||||||
|
# Get all hosts from flake
|
||||||
|
echo "Discovering all hosts from flake..."
|
||||||
|
hosts=$(nix eval --raw .#deploy.nodes --apply 'nodes: builtins.concatStringsSep "\n" (builtins.attrNames nodes)' 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -z "$hosts" ]; then
|
||||||
|
echo "Error: No hosts found in flake"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Create temp worktree at HEAD
|
||||||
|
worktree=$(mktemp -d)
|
||||||
|
trap "git worktree remove --force '$worktree' &>/dev/null || true; rm -rf '$worktree'" EXIT
|
||||||
|
|
||||||
|
echo "Creating temporary worktree at HEAD..."
|
||||||
|
git worktree add --quiet --detach "$worktree" HEAD
|
||||||
|
|
||||||
|
echo "Building and comparing configurations..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
any_changes=false
|
||||||
|
|
||||||
|
for host in $hosts; do
|
||||||
|
echo -e "${BLUE}━━━ $host ━━━${NC}"
|
||||||
|
|
||||||
|
# Build current (with uncommitted changes)
|
||||||
|
echo -n " Building current... "
|
||||||
|
if ! current=$(nix build --no-link --print-out-paths \
|
||||||
|
".#nixosConfigurations.$host.config.system.build.toplevel" 2>/dev/null); then
|
||||||
|
echo -e "${RED}FAILED${NC}"
|
||||||
|
# Re-run to show error
|
||||||
|
nix build --no-link ".#nixosConfigurations.$host.config.system.build.toplevel" 2>&1 | head -20 | sed 's/^/ /'
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo "done"
|
||||||
|
|
||||||
|
# Build HEAD
|
||||||
|
echo -n " Building HEAD... "
|
||||||
|
if ! head=$(nix build --no-link --print-out-paths \
|
||||||
|
"$worktree#nixosConfigurations.$host.config.system.build.toplevel" 2>/dev/null); then
|
||||||
|
echo -e "${RED}FAILED${NC}"
|
||||||
|
# Re-run to show error
|
||||||
|
nix build --no-link "$worktree#nixosConfigurations.$host.config.system.build.toplevel" 2>&1 | head -20 | sed 's/^/ /'
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo "done"
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
if [ "$head" = "$current" ]; then
|
||||||
|
echo -e " ${GREEN}✓ No changes${NC}"
|
||||||
|
else
|
||||||
|
any_changes=true
|
||||||
|
echo -e " ${RED}⚠ Configuration changed${NC}"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Show nvd summary
|
||||||
|
if ! nvd diff "$head" "$current" 2>&1; then
|
||||||
|
echo -e " ${RED}(nvd diff failed - see error above)${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show detailed closure diff if verbose
|
||||||
|
if [ "$verbose" = true ]; then
|
||||||
|
echo
|
||||||
|
echo -e " ${YELLOW}Changed store paths:${NC}"
|
||||||
|
|
||||||
|
# Get paths unique to HEAD and current
|
||||||
|
head_only=$(comm -23 <(nix-store -q --requisites "$head" 2>/dev/null | sort) \
|
||||||
|
<(nix-store -q --requisites "$current" 2>/dev/null | sort))
|
||||||
|
current_only=$(comm -13 <(nix-store -q --requisites "$head" 2>/dev/null | sort) \
|
||||||
|
<(nix-store -q --requisites "$current" 2>/dev/null | sort))
|
||||||
|
|
||||||
|
# Count changes
|
||||||
|
removed_count=$(echo "$head_only" | wc -l)
|
||||||
|
added_count=$(echo "$current_only" | wc -l)
|
||||||
|
|
||||||
|
echo -e " ${RED}Removed ($removed_count paths):${NC}"
|
||||||
|
echo "$head_only" | head -10 | sed 's|^/nix/store/[^-]*-| - |'
|
||||||
|
if [ "$removed_count" -gt 10 ]; then
|
||||||
|
echo " ... and $((removed_count - 10)) more"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo -e " ${GREEN}Added ($added_count paths):${NC}"
|
||||||
|
echo "$current_only" | head -10 | sed 's|^/nix/store/[^-]*-| - |'
|
||||||
|
if [ "$added_count" -gt 10 ]; then
|
||||||
|
echo " ... and $((added_count - 10)) more"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show content diffs if deep mode
|
||||||
|
if [ "$deep" = true ]; then
|
||||||
|
echo
|
||||||
|
echo -e " ${YELLOW}Content diffs of changed files:${NC}"
|
||||||
|
|
||||||
|
# Extract basenames for matching
|
||||||
|
declare -A head_paths
|
||||||
|
while IFS= read -r path; do
|
||||||
|
[ -z "$path" ] && continue
|
||||||
|
basename="${path#/nix/store/[a-z0-9]*-}"
|
||||||
|
head_paths["$basename"]="$path"
|
||||||
|
done <<< "$head_only"
|
||||||
|
|
||||||
|
# Find matching pairs and diff them
|
||||||
|
matched=false
|
||||||
|
while IFS= read -r path; do
|
||||||
|
[ -z "$path" ] && continue
|
||||||
|
basename="${path#/nix/store/[a-z0-9]*-}"
|
||||||
|
|
||||||
|
# Check if we have a matching path in head
|
||||||
|
if [ -n "${head_paths[$basename]:-}" ]; then
|
||||||
|
old_path="${head_paths[$basename]}"
|
||||||
|
new_path="$path"
|
||||||
|
matched=true
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo -e " ${BLUE}▸ $basename${NC}"
|
||||||
|
|
||||||
|
# If it's a directory, diff all files within it
|
||||||
|
if [ -d "$old_path" ] && [ -d "$new_path" ]; then
|
||||||
|
# Count files to avoid processing huge directories
|
||||||
|
file_count=$(find "$new_path" -maxdepth 3 -type f 2>/dev/null | wc -l)
|
||||||
|
|
||||||
|
# Skip very large directories (e.g., system-path with 900+ files)
|
||||||
|
if [ "$file_count" -gt 100 ]; then
|
||||||
|
echo " (skipping directory with $file_count files - too large)"
|
||||||
|
else
|
||||||
|
# Diff all files in the directory
|
||||||
|
for file in $(find "$new_path" -maxdepth 3 -type f 2>/dev/null); do
|
||||||
|
[ -z "$file" ] && continue
|
||||||
|
relpath="${file#$new_path/}"
|
||||||
|
old_file="$old_path/$relpath"
|
||||||
|
|
||||||
|
if [ -f "$old_file" ] && [ -f "$file" ]; then
|
||||||
|
# Check if file is text
|
||||||
|
if file "$file" | grep -q "text"; then
|
||||||
|
# Get diff output
|
||||||
|
diff_output=$(diff -u "$old_file" "$file" 2>/dev/null | head -50 | tail -n +3 || true)
|
||||||
|
|
||||||
|
# Filter hash-only changes
|
||||||
|
if [ -n "$diff_output" ]; then
|
||||||
|
filtered_diff=$(filter_hash_only_diffs "$diff_output" || true)
|
||||||
|
|
||||||
|
if [ -n "$filtered_diff" ]; then
|
||||||
|
echo -e " ${YELLOW}$relpath:${NC}"
|
||||||
|
echo "$filtered_diff" | sed 's/^/ /'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
# If it's a file, diff it directly
|
||||||
|
elif [ -f "$old_path" ] && [ -f "$new_path" ]; then
|
||||||
|
if file "$new_path" | grep -q "text"; then
|
||||||
|
# Get diff output
|
||||||
|
diff_output=$(diff -u "$old_path" "$new_path" 2>/dev/null | head -50 | tail -n +3 || true)
|
||||||
|
|
||||||
|
# Filter hash-only changes
|
||||||
|
if [ -n "$diff_output" ]; then
|
||||||
|
filtered_diff=$(filter_hash_only_diffs "$diff_output" || true)
|
||||||
|
|
||||||
|
if [ -n "$filtered_diff" ]; then
|
||||||
|
echo "$filtered_diff" | sed 's/^/ /'
|
||||||
|
else
|
||||||
|
echo " (only hash changes)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " (binary file)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done <<< "$current_only"
|
||||||
|
|
||||||
|
if [ "$matched" = false ]; then
|
||||||
|
echo " (no matching paths found to compare)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$any_changes" = false ]; then
|
||||||
|
echo -e "${GREEN}✓ All configurations unchanged${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}⚠ Some configurations changed - review carefully before committing${NC}"
|
||||||
|
fi
|
||||||
25
secrets/beefy.yaml
Normal file
25
secrets/beefy.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
kopia: ENC[AES256_GCM,data:/6jqArNgeBoGnEdJ1eshrsG8RJs=,iv:2nNdrKczus70QDdvO/MC2wJubGnAf3M8PtzSe1aoBF4=,tag:aOoktsqhQLXr0YkjYZq4OQ==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBODczb2FsMis0cVIvN2FK
|
||||||
|
UG1QVWt6U1MvaU1Rd0dXWDhmK2RpZ2dSUXlRClZ5ZGRZRk1vUFp0eVVwVzA5R0Ni
|
||||||
|
RUdjVFh5T3o5ZllFaHVFS1pCWjNzVkEKLS0tIGoxNWhhZUhVSms5cEJEa3lZQWlz
|
||||||
|
aXNBMWhUNFBHTDJUZEtDeU85Z1pPU1kKWNm6Wk+Mbc9QIXMXiwleIvP4hlGLvmpI
|
||||||
|
u+udOAinxTxmB9LOXG+y3iPuS9n0B6Y+4WbTjKm9jEqaqNoW8JypJA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1cs8uqj243lspyp042ueu5aes4t3azgyuaxl9au70ggrl2meulq4sgqpc7y
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjTG1MZjM1bVNqeFNqeTgy
|
||||||
|
UXZBWVVacVVsaHJKMkJ1ZWdCbG4zS2tBWDFjCnpSbUw0ZFZMVENNNmYyTWZFdndL
|
||||||
|
RmxUajdsU1l1cmlZa2NQQjJublVsMmMKLS0tIHNpZmRpY2hIbVVZSUdGNHM2WnN6
|
||||||
|
R21jYU96SGVHOUxmZjlldS96K2VqbWcKC28wLdT/zx6yHluCLqB/cFRmc0Alq6AH
|
||||||
|
DqmAaxRhOg/SI5ljCX1gE5BB9rNIJ1Gq8+li7wCpsdfLMr5Yy/HAsw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-11-04T20:25:17Z"
|
||||||
|
mac: ENC[AES256_GCM,data:llS+R5Pj51ZUkU8FkJx2KqqE4D42Uno3Btn31FadIl4kFamnrL6uJjbiNEJpFFO+SchXD3l7VCatbBhMSoxsPYd+rdDRT2klq+iIcZU/k413GC87xdmHIwWE+L2pujv36iBjtM+HJTSvXI0xOxjUmzH4FPdEa1r3Z5yGNnCI+Q4=,iv:ld6pSEzvKTSZtBb+QjHyqqj2VT05YegxBrUR2yxhjKY=,tag:7/vYBh8lDOcVXJL3esTIZQ==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.11.0
|
||||||
@@ -1,30 +1,25 @@
|
|||||||
kopia: ENC[AES256_GCM,data:pZrOFlW5wy8I/leTc1mJEB31Kr0=,iv:COsdcI32ZD66M6h+5L2bBf1N8471LbVEt7TlhYQyMnM=,tag:hJXohn6/zUd5WOSYzXdPwQ==,type:str]
|
kopia: ENC[AES256_GCM,data:pZrOFlW5wy8I/leTc1mJEB31Kr0=,iv:COsdcI32ZD66M6h+5L2bBf1N8471LbVEt7TlhYQyMnM=,tag:hJXohn6/zUd5WOSYzXdPwQ==,type:str]
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
age:
|
||||||
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqN0txU0NKVVprUnlCWGtt
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpTGZsSS9MWmZFTFdKa0hz
|
||||||
azFFdzJuMHN1MVlWemJIQ0lQRU5TZURpQjEwCkNHcGVaUUtESTZCVVFpa2pxLzF6
|
SENqbHVIMzlPVGkvNThOQ0JHNXZFR1ZkdTFVClNoUVdUVkxEV3NCVXZ1eE5uY2pW
|
||||||
bmZmMVlqRWtvUVNtajNqWTZxNWJWZEEKLS0tIHovT1N1TFgrVjlXYUZSckJ2K1lr
|
TmVLSWRaaVd4ZzdVYnZvekpWK2dUb0kKLS0tIEw5WjVoc1ZBTmluUVhlVmdKc0tB
|
||||||
VWZoTjBWWVl3WjVSMXc5VENPbkJlNXMK1Mi9CDyY/zn090pgGIWmbY5fR/G9fpwm
|
bzZ1U0YydlNMSGExUTRrMWViWXhTekEK51bUaoMKTPTvGeG0vk9tu1TxkbgkNdff
|
||||||
rtl32WdXCcpo8c+XgzYowRw4qxNnNL4gzvGn+91And55eF25Ozl+yA==
|
u9NLDF1LhHOet7iisUOUFjXtZuA/1IFwUlFMKgF7w1PQtoA+G3+X6A==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1gekmz8kc8r2lc2x6d4u63s2lnpmres4hu9wulxh29ch74ud7wfksq56xam
|
- recipient: age1jy7pe4530s8w904wtvrmpxvteztqy5ewdt92a7y3lq87sg9jce5qxxuydt
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwMm1BWUxKdi9VNFNFOUdv
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1N0JsTmJUTE5Qd3QyaDV5
|
||||||
UTB3ZzdiTVBUKzd1eFZ6ZWxteW1lc3NFOXljClFVUkRqVjVtSTg2aVluNnZNNzdx
|
ejBmMS82OHFSZW9tQTUvbVlKaVY2N0lqVWtZCnAxSnFreklwWG91cFFEelVHZ0pJ
|
||||||
RFVvT1hxUkR3SzU4NXFqbXNYUU5JWk0KLS0tICtFWFQveDB6SnVqNXRXZS9FbU9D
|
eDBYVzVwNHlZZUx0bXZvZm9GdUttR1UKLS0tIHc0N01UNFcxL1V2cHMwVXVkZ3pB
|
||||||
TDhodzYzV3AzWmdjQ0Q5UEJLWTFKT2sKoIz2O7Ot/F+crGjaYvCQRM5iuzMG3L3J
|
dUFCbXZtWjJNL0xEMmRtZjBEdnpLOWsKHaS5RtzjIRyQLEwiTrDaGB/oN6dMi/DL
|
||||||
sjysqAuESLrcUwPX574NwRaOKvlpTaNnKtl7ZXqKnbfucTJPc6o8NQ==
|
Hf0pBSvjLjuAL7YIgfTzLUuHR62PdgH39fhpd3W7LG4/WerXjLS/Lg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2025-03-12T11:54:39Z"
|
lastmodified: "2025-03-12T11:54:39Z"
|
||||||
mac: ENC[AES256_GCM,data:g8nz1Azs5X59ulimMRzgvKz9Y7lKnjFq2SCctdt+yMBLojlk8RXMSf7tY311dZLcd00wi8xsGlBY1XaCbDjIlkG4sLWuQIareYjfqGK5s0pRvELTTF2ZE9yY+5iYdeVkBe7yv44sWJGNN1BcgFpR9zUouA+6yKVt2/XcPu8+7Fs=,iv:zDyECD2w1bTq0xbart+cIjHBAmfSHnpFG5nHPbiT2UY=,tag:b50oQfRgLtI/XbkINuzx5A==,type:str]
|
mac: ENC[AES256_GCM,data:g8nz1Azs5X59ulimMRzgvKz9Y7lKnjFq2SCctdt+yMBLojlk8RXMSf7tY311dZLcd00wi8xsGlBY1XaCbDjIlkG4sLWuQIareYjfqGK5s0pRvELTTF2ZE9yY+5iYdeVkBe7yv44sWJGNN1BcgFpR9zUouA+6yKVt2/XcPu8+7Fs=,iv:zDyECD2w1bTq0xbart+cIjHBAmfSHnpFG5nHPbiT2UY=,tag:b50oQfRgLtI/XbkINuzx5A==,type:str]
|
||||||
pgp: []
|
|
||||||
unencrypted_suffix: _unencrypted
|
unencrypted_suffix: _unencrypted
|
||||||
version: 3.9.4
|
version: 3.9.4
|
||||||
|
|||||||
@@ -1,75 +1,97 @@
|
|||||||
ppetru-password: ENC[AES256_GCM,data:ykxGdbwTLNGKGy7PI/6uLyeWzEyfTo6R7d56m8Lb7kyY6rF0ovDzMGv71ruBA3CwznIp5EaCopvKVXf35xIEyptpQJie++ireQ==,iv:ArWScjeDHp/4DurW+id6PLUiwnMVVwk7iD5S9Bzc8lc=,tag:uErsF74I5D1M86Yl78Gqlw==,type:str]
|
ppetru-password: ENC[AES256_GCM,data:ykxGdbwTLNGKGy7PI/6uLyeWzEyfTo6R7d56m8Lb7kyY6rF0ovDzMGv71ruBA3CwznIp5EaCopvKVXf35xIEyptpQJie++ireQ==,iv:ArWScjeDHp/4DurW+id6PLUiwnMVVwk7iD5S9Bzc8lc=,tag:uErsF74I5D1M86Yl78Gqlw==,type:str]
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
age:
|
||||||
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBDZ0dzYmlHVHRnSjNwUWhI
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4WExPaEtTdEljYkF1ZUQw
|
||||||
M2ZhTVRtN2ZIb0JacXpaM2hxejFab2tkdTJrCnFaVUpBSGpKUUNzL0xEMUo4Qkg4
|
UHhRNDJZb2wydWVUaXFmR213SjJsNDFKU0FjCnJ3Tk1yZDZkU3orcHZ2UDY3elRi
|
||||||
eWpLL3RRMkovR1AvYklLNXcvZGtrR2cKLS0tIExPN3lPTjFueGlzc3c4UFVjcHVO
|
WW9FMXU0cDNjV3QrOWo3MVB0UzMwakUKLS0tIEhQVldBVWhmR0k0WW9jTE0xc2ZW
|
||||||
Y0N2cFlKSkNSU01SOEN1OXIvRmtQbFEKDGuIvYvMhXWOz9GLIDSs/PEaXpwn3Ust
|
RWp4ZjlVN0FWaURlRHNONDhXdmJpS1EKZVXYyFRFD9KdyWuMoQytkQk4VxpBRyAV
|
||||||
BffIB24x01nPXdz0O+GHC8J4LkvdwRrYL5kX6vqZ/RWOQEpPDpjvFA==
|
lF4FA99wjGMhHFNQExnqYYLYtFkA18/SB6pkneOjdhIvEr0IFLJEqg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
|
- recipient: age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA2TkxMSFJLbzdPTTdYR0hC
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZ29wdk1aOHZJYWFjaG9v
|
||||||
U1dSVENJckFjVlBkUThrYnRUN2Q5ek9JcFhjCllmVFYzenF6SHByUGtFQUhTZWg2
|
RGxsek95QmtrZS9xRWdKMFdLSHZ3NmlZRGxzCnBvRXZkYnkxdkhJWkY0Ukg1M0dE
|
||||||
UTBLckZpYWQ2QXkxaWMvR2d6eHREYTgKLS0tIC9DMmZ0QXVUMlJ1NVZielV6dWlv
|
dWc3QWtCdkV5Ymd4MkxhZWl0ZDNCZXcKLS0tIGMrVWtNNWtscm9STUN1aHVZc2Ny
|
||||||
QWpybkVtcVhXOEhHRVFNMUJhMXhqSW8KcrPWhqGA8J5zIu5JaBd7N4VjR4iq/6Mq
|
Vm1oaFFTbTBpRWxuR3gxbUZ0YkZieVkKdaSSXrDzAUGkj3w8/YcFZaJTiUUEbJdw
|
||||||
qfi3OPQQlisN6zLzpM1kWs+BTzeAVzfC+UXKmuFeOUHcVJFG6TbLMg==
|
GjuLz7bxX8+HQvhSbu6/KCwG6R4j1eO5Zg1w0wYtyeUOV1HfZEGQog==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
|
- recipient: age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQMWltT2Yvdk96elVqWWI0
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBIMGpibmNRUDRFaFVOTDB4
|
||||||
WE5rR1ZjYXMxYXNiNWdlLzJWMkJObGFENnpVCnVsenJzdUIrc1M2cFJReUExSVU3
|
RVdTc1RrTmRPb0dlZGlpcGxuRlJ1L2w5MVVBCi9HdXNGZmdSaVZsQWRoa2RpVDNV
|
||||||
dWpMUk53dU9UTG9EUlNOTHBja0JqazAKLS0tIGYzU2pxVmpFR3UzaDhCd0ZrdkRj
|
OXBtS0pwYnhjS2hCUk10UUtwam4zMWcKLS0tIFV0dVpQNGpSOEVoZnE5OGpCZkxa
|
||||||
V1V5M2g2elRMR2lYZHM0QVRTdDFBOHcKFIlNxdy6KyZK42qsLgXNIR0lTmNnCOLS
|
MFMxSG95dmJncGJzR29mQkVzNjFIQUEKrJ0MDTBmiwiAaLt7CJ1pjlxuFvZJuRkR
|
||||||
xn0MT+YG6j4YP23OslkjXlr8lEAOggh6+2fFssRXtXZGKdQobQl3Jw==
|
EuLYOYLdVaxgZ442io5OE7wme0P4LLcxSAreDG84GVs67JHvsFE89g==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age14aml5s3sxksa8qthnt6apl3pu6egxyn0cz7pdzzvp2yl6wncad0q56udyj
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBucHdSNGNyRkVITmNDVkpx
|
||||||
|
QVFKK0VucFNSMnNqSGRFRmRoRWpsZ0srUUhrCkwwY2pDSkJ0aGlqc3U3ZXNJUVl0
|
||||||
|
bXZMSVg3bDhaK3d1MTBnL1BQVUhkMUkKLS0tIDdxSk1DMVpsbnI1QlFnNEFJYXRD
|
||||||
|
RTNxYUxlUGxsM1NvekZ4R1hQVE9KMk0KocfE75DTfQMj/RsznOdeF82aO8WwO4HD
|
||||||
|
1xakOM2FHoHi60Q5uOWzfGtz0i+R4ue9hafa5Esn01TOjc3qWSlW3A==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1me78u46409q9ez6fj0qanrfffc5e9kuq7n7uuvlljfwwc2mdaezqmyzxhx
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYTEhiSDZvZTg3ZWxJRXlu
|
||||||
|
a0ozOXRVL2lia054SkNLc2tEYmlZUCt1NW1JCkorK0hub1pLQTE0QThEUDRDWXJV
|
||||||
|
YWtGamNxMTFIYjVDT2RqTXh0Z2hVTjAKLS0tIGxoRTAwc3FKVVNSQndtbTZmc3BR
|
||||||
|
QnMrK2lMT25tR1ErV2xvS01JWWswVUEKtrGaLETMfY2D8qmgml/fgGxkvQLoiMTP
|
||||||
|
l3a7Y6kwutuzRnmW1tnWv7yoPbTn+BDwfOwBcnesl2x0aJ5iLUKruA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1cs8uqj243lspyp042ueu5aes4t3azgyuaxl9au70ggrl2meulq4sgqpc7y
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqMFJ1bzQxWjlZTmF6eEl0
|
||||||
|
d3VVd0VsbGZGdDBKRG9uNEhaMThmWmpuQ1hFClA1aDhwRU1Pb2pibGh6T0pqdmlq
|
||||||
|
S3cxM0wyWWlCL3U5TjV4Vkg4blRsUVkKLS0tIENnYk5GbmZWbFo4cElON1Z0ZVlv
|
||||||
|
ZDdsci9rcG5Wc2V0NlQ3MWx1cFF4dUkKumFT4xtjGDBGK+/SV27Dh/vyGMJAEZNo
|
||||||
|
9gTmVLfR9vXVAXUdOMcqgo7Nl4OJCS4HrDxvVCoER/bVoQVRiPzuXw==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
|
- recipient: age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzK1FBcHh2NlBCMDVJTVJi
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCM2E5a2lsZGJzRnk5N3Rr
|
||||||
V3JzYmRqVnNxcTBZSWJacDF5NUF0dGJqWWxZCk1aaTdra3RRcklIb1VkU1VpRGlI
|
bWRwRlI2c0c4NzBNdEVqMGZFWTZtNDlvSzJFCmFPM05XbndsazRGWEw3Zy83dldm
|
||||||
VVZNTUFXQzcwT1NRUFFtZTFaZERiOTgKLS0tIFNTbUVXQmRaWmdPWWVzMTJEYk83
|
eXhEZUZQZWk5bVNwaEk5SDRka0NOWjAKLS0tIHNvZ016Rjh5bmYwRUxyRWRydFpI
|
||||||
RGo1aDJJV3RiRkJsTXNoa2ZFSWJNcFUKM21CtHAX2swT++JqKSQ2R9htE0+Csvlz
|
Z0NHYjFzem55bVNORGlVbVNxR2psc2cK6JpNZwznwgl61d/W9g+48/894TQRe4gJ
|
||||||
h/SfoTkVlm8OPrYzaEQV0SB0yxC7jgBKL9X5HZQDaflGbTUBi9LP1A==
|
nl4oDwRPbZwJZgdAKQVfTTujB0QbWpJc24mDGD4I4CydqTKwy6FN3A==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
|
- recipient: age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtRGxmaUE5V1NabytJT3E0
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlK1A1eVdRQThQUHdqbHdk
|
||||||
elZ0T1YybS9OQXdERWdoSkxpbDcvM3Buem1jCnBjSWROT1NxWGxXOERCLy82akJ4
|
b1MyMlBJUFluTm13ZWwwc1RNbThFZUMrNXhzCnRPTVhPSzUzM0VtaUVJbFl5Wllj
|
||||||
clVyVVpZMTI3cTEvT0U5aWorQ21LN0UKLS0tIGJDZGhtUWVVQmpKcnFvNlZvUS9B
|
NUlndzc3Yzhjd1JSb3czajI3UmRDZ1kKLS0tIE03M1hab1MxU0I2VExBWlh2TnJC
|
||||||
STdUQUxXcUNnRmZvNzVIZjlVUGVuWFUKp8qPooDNNFa73mRtmBuzwlccVBX7TF7P
|
eGRXRTlsWmlpenJrVkMxakJZVTV0cE0KMQCKscSLnCu3NsurFFiDaUGjJbyIAwd0
|
||||||
NcQQUzTe5i1B2S5Q8iDVkEKnPJxb10KGJEGGD+gh29beOWsZXEu06g==
|
HTutCiuPYVI4zznQ3RZDBeO5L6a/twXxMRTePUCwOkRNWRWpzR9nxg==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1gekmz8kc8r2lc2x6d4u63s2lnpmres4hu9wulxh29ch74ud7wfksq56xam
|
- recipient: age1jy7pe4530s8w904wtvrmpxvteztqy5ewdt92a7y3lq87sg9jce5qxxuydt
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwVVJQOXlXRVhYMHVZQjdD
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB1VEJmMWlnemFGNExWYUI4
|
||||||
VzRGSUw5L2hRbXdJYkJndUlOb3ZPVWJ1dUcwCkxGLzBLd0RUeWwxc1ZZL2hTYzUz
|
QWRwRktwODNvSmlEcGJORHNlQXNVeXVpbFNrCms0QUFNdDlrNjMxazU1UTcwc2JF
|
||||||
VVBjZVFzN3VCY3o4UXFIT0plSEFoWm8KLS0tIGhJRVRLMVE0eGtkeE82SlMydE1m
|
RC9JUnJsWmgyc01zZU9JQmxuM3V6STQKLS0tIGxQZGFsZ0pNTjQ3QW1sS0E2Y2RM
|
||||||
TDhLOENRREVlemt0ZHBid0RNelV0bUkK0MYZpO5AWieaHnW/tP8bND/bJQYKf85e
|
aVVrNW1BNXQ5UDk1UEtVVWJPNHpwUFUKcArFPFknBj8ss1lD38YtMaB06L/ASeu5
|
||||||
fEs1AE83bhS4pLGhf7elXUW9Yc7YG7M7maPyK9Yf3G8cFH1sYLYhVQ==
|
u4ff0rTDx237snaSFg5RIJ+6uxX16p5ODg3xOYGOMkDeuTLdl2bg3A==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
- recipient: age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
|
- recipient: age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
|
||||||
enc: |
|
enc: |
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBIcjlObDQ4eVE1SjJrUlBF
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArWTNkaFlrQkJHRnd4cTBw
|
||||||
YlVyS1FDYThtdDNGSTVReTRidGVPMXRZVGdjCnkvZENzMkFBKzZaU0paOFJkRmMw
|
N3dnTXk3SlJkQkZDdWpLcEpNQ2Z2RHZoVjBJCjBaK1MzbzdaaXluR1dFaFFNaGEx
|
||||||
MWpQaTg0c1RweStNeFVZZ05KY0VDbmcKLS0tIGhjNkxMeDhxVEtLdTF5Qjl1MVJv
|
VTNrVU0yeG9KQkhqUkYxU3VBM0E0R1UKLS0tIDJHek9vVldSZGN0M0c0UHcySGhk
|
||||||
UHZwRmc2NjNDUlJCdWN1V1dhS1RkelEKF1KiZLQvruEAfjwbW8lIyzvcCqeAMReI
|
Z2RoZno4bmhidytlL2ZmNWUzNTcwcVEKXvgaO8Uo0R+Kc8lizLtVxmTi0W5XHjYw
|
||||||
svl1uSaSaxPtCbnc9RA2nfo0vvCoz0a02dhr7CAy3syfQPLLZqRAIA==
|
7evdCHQHmFl0vg/bGOJBmcTUhioJv06D0LR3XMl9I6ufXDNaT/NHxw==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2025-03-11T15:54:47Z"
|
lastmodified: "2025-04-04T09:34:06Z"
|
||||||
mac: ENC[AES256_GCM,data:GIHJcwKrRLBhTb3lj9pUza5Fyr9XcKbOMQAe+WETsyr5uHf7lNtlJOXjk1rjBIyJNUJDDnaGSUxCZ213xXIeNBJ92zN54kPheakOiLPOZN7N0YEsU6iENxsuVbQLvvDGvTY5t86DkV6vgClATKj/nqVpkPFAluh2zxLVbBeQrm0=,iv:rF8pesuNU3moerP0+wFuW02A6FYOTMyWWWWr90OB4Zc=,tag:ZXr/FAW37OynDBrGiksLLw==,type:str]
|
mac: ENC[AES256_GCM,data:YIcRrsPparPfPaI2+MLlKsxu7M19H8nndOsrDLuh/5BXzIZNiuTIWyvxODyhI745rDwlibO+7Q0QctanhTl4+IzGaYtuY4i+rb+3dzBMpcdT2VAbtCHHxcltWeanRGFq2K3WM2tbnQCERst5kejfn0Razjq3UU5vNwfBsdJMwGc=,iv:izDxy0ufVnH8ImkZIngcYhGuj0PGpLqBD/ZDvQyE+5I=,tag:oYBUEQS52pr09h5OvOadNg==,type:str]
|
||||||
pgp: []
|
|
||||||
unencrypted_suffix: _unencrypted
|
unencrypted_suffix: _unencrypted
|
||||||
version: 3.9.4
|
version: 3.9.4
|
||||||
|
|||||||
25
secrets/sparky.yaml
Normal file
25
secrets/sparky.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
kopia: ENC[AES256_GCM,data:AS5zTDpPPuPGEoT05uHyAfPTbls=,iv:YZK8O0/osP0/ay1tw2kkiCoxws+DlzquVqXNdVayE+k=,tag:tCNM8fzEEuRTPDJybq7fUA==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtSjhXazlWd3YwNVFKVkw4
|
||||||
|
dDMydVFCN1lLeUJOWkxuSGJ1a0srNm9PaWswCm8yZ3hiOWFHUlAzNVRrck53OElD
|
||||||
|
b056YmV4S2NtNnEzRkpnRVNEblV5blkKLS0tIG1ramoya3RHV1FJZGlFU2ZSeUtS
|
||||||
|
KzJlbEsvYWlXaHhEQU5oOS9HaDdYSDAKvlhKgi4Pf8xVB5MnO33GWYg313mRdUGu
|
||||||
|
kFCs5b1N96x9JOS7zgnM0AKDY8IPBSe33tmDqtYygwPdkOys1PmZkw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age14aml5s3sxksa8qthnt6apl3pu6egxyn0cz7pdzzvp2yl6wncad0q56udyj
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYTVovQld2RkRxaW90b3lR
|
||||||
|
NGFtbWVLZUNHdnlZVWkrL1RXUHBVeGdvSDJrClJmSmZRZmdjcy8rNnJBVmVUWDZq
|
||||||
|
M2lPbDBhT0Y0NkJ5a1FNYnU3Zkl0TkEKLS0tIGxqM2h2TDB2akl4ODlYY042R1Z4
|
||||||
|
ZVJWN3pZelFJR0Jid3JseEZKVFZtYmsKmKXQRjnghuF/s9z2Xk98sFvxic91fGa2
|
||||||
|
V7IGmpqAYQV3jJ1G4cjJxtpidQ6fLCqlnR+sq+y8+dT+LN7i+Zbnnw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-10-19T17:33:13Z"
|
||||||
|
mac: ENC[AES256_GCM,data:IwEyBr/I7BJa0gWZ494dCT0ogyP2PbnUg5fLOn15vZAHIyYtTB3dI3gV5Lx7oPdqOPlI61MsShIYBnk0uBChpNu6O4oiGUfwvBfegzlDyHHERLx+S7nZpcwmf/3JoNXwq0f2OtOu8nA6Q1V4gVjFFNWUCAh5cq106vG1awsQkn0=,iv:j+JcVtKz2RfyWu55dUeJJTRK6prB9DGLvcjiAAdVySM=,tag:Pg5sKiLzYUFoN9Duu+nF0w==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.9.4
|
||||||
25
secrets/stinky.yaml
Normal file
25
secrets/stinky.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
kopia: ENC[AES256_GCM,data:boi8V0Kn,iv:Kwe1hn44DJe9dpv8jVrJjwyblVouakuCdnEK9uotTkY=,tag:B5hrpRBP17kFVn4iy5TOlA==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4aXRBWUg3QU12UWNBNXI4
|
||||||
|
RS91amdnOXhibXpiYjgrQ2FwR3J0VER1cEdjCnlaTXFSMzFPNmlnSTIvejJuMUFU
|
||||||
|
T2lUSDRQeFVtT1ZHb2xNZVNpWDJOTmsKLS0tIElHK3FVbUFSREMwcVN5V0tPSEtt
|
||||||
|
a2ZyNXFnZzBkeWZsU2docUxzQVMyUFkKkiEW3ovgVBLlBEHyx6hSXVp8PTeZ+2PL
|
||||||
|
kzW8AnQTi714iQqyN3NlkJ8r+1doGBr9U492KXpjdt1woY4MwMvWkA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1me78u46409q9ez6fj0qanrfffc5e9kuq7n7uuvlljfwwc2mdaezqmyzxhx
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCN0Fvdm5Mc3lqNDh6VEp5
|
||||||
|
Mk83N0dNU2pnWVFRODl5MllIaHdIUm5SclJJCjNPQVBGOGRIRlVVL1lzYU5ocjRx
|
||||||
|
Sis5VzZXN3AxYUpxNm9pZE5WUzZGaTQKLS0tIFlpL09vQmpDWjNJeXg5dUZZSm5O
|
||||||
|
S2ZOdExQdzJRcGdmUWtrRUpmVy9lY3cKJwEoO9WltW2FIFEylGuWBHwSJlnAIy8M
|
||||||
|
FFCmmApdkzJLwvQGg5kNC/4Xx34ZfNTTpePxh9qP0ASxUQASZo2urA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-10-26T16:59:40Z"
|
||||||
|
mac: ENC[AES256_GCM,data:FlSv9PIcmX+oJNVaUpXIG2thzUvEb7bMGDOvIRgAFVzoUipIes0qdbU0R/pqogW0NpgbXNLhNBmemKfheGusngatJmbNwHT9Hqo7a82U9j1G302sziqrcz1pOxG79oacFEM+coWpXGgmMXYeNlQEihUvvvUt810VWBb3Hjba80g=,iv:6gSTUd2y9YxiOCzwQ/udLN46lgfwgWDgfSTOpaJpPmY=,tag:q/Ta6fejjKMg0TmZhNmy8Q==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.11.0
|
||||||
25
secrets/wifi.yaml
Normal file
25
secrets/wifi.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
wifi-password-pi: ENC[AES256_GCM,data:n5ZfyhBCrHx98uUxFQ==,iv:9SQHcIw252GeS0IxON3ThqOk02Wtlfu/Df6KMLAmokw=,tag:F5pAHlInkqVUQDg8MPnMgQ==,type:str]
|
||||||
|
sops:
|
||||||
|
age:
|
||||||
|
- recipient: age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBUd1lyZG9GVHBZZHU0Wkl5
|
||||||
|
RFJ2NUdtUFRUbmd3aTRFV2dGaVA2S3RWOGk0CmlLV2ZYdERvb21iT0dlUk42TW5S
|
||||||
|
LzdxVlA1U1FpWkxIb1pMeUtRRm9NdFkKLS0tIGszaFM0dkhHeWZUcXc1dlo3SDBX
|
||||||
|
WjltV282VlJtTlBCRmdzOU16R0x5UUUKBTFArSUNWtq7r+HduxT0ChvYfjo8HtbG
|
||||||
|
KeYBoB9QwY5wNRMlk0AIlJVNLKW8A2tC9T8ehbtjol13H7PQK+wsQQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1me78u46409q9ez6fj0qanrfffc5e9kuq7n7uuvlljfwwc2mdaezqmyzxhx
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4THVFa1p5c2l5V0pKckVC
|
||||||
|
YUdYbitJbUpjclAydG4yekxhbXdzeDNpbXdRCnRCZVI1cWJiQi9TdkR3Y0E5TklO
|
||||||
|
T2dHYXFKeW9KSkdXOWFnbWVRQUZOL28KLS0tIDVMVldvd0NWcU5QWkhDTTBmUTla
|
||||||
|
aUs0dTB3Y3RXTlBCOCtYSHdOMUYxdTgKQShxsJ+3EQU18uixmM3FlCe5C9Rl3oS5
|
||||||
|
gwZIrh0amSzX3f9SOjf42h1d+IDL/DMWAlSA/3XMx8TK9A1zKZDgVA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2025-10-29T14:55:56Z"
|
||||||
|
mac: ENC[AES256_GCM,data:2zTEzx8UxOMHIytiufCHS/B1ci7kI05+SIE8ziMY8/ItoAYtt0zXXEgRWs0NLVb3P2vXMOhnXG4qO1o20UXt6Wqq9j1zXPVaQTQie4QSPDdX/8OXXi87Ggm3WQyeA1IfABacfL0D6XkNvxfMHGvMrnhYltPPgYDuNlgjnnjTm8o=,iv:FJMKLSlAenvSNUH6OmeGIR7f9Bzl3NwqaUaokoHEj50=,tag:WU4BnlLu5cKSbtiYL0mNKg==,type:str]
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.11.0
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
kopia: ENC[AES256_GCM,data:MtzeNkkIwMnImZBx0mrpFVwkNXk=,iv:1iRQTyJgF1vEchOwFxv7qLte8lhrM+16cldUlMwyprQ=,tag:Bz/jLj9iGOgALPmvWe48pw==,type:str]
|
kopia: ENC[AES256_GCM,data:MtzeNkkIwMnImZBx0mrpFVwkNXk=,iv:1iRQTyJgF1vEchOwFxv7qLte8lhrM+16cldUlMwyprQ=,tag:Bz/jLj9iGOgALPmvWe48pw==,type:str]
|
||||||
|
lighthouse_jwt: ENC[AES256_GCM,data:gosbFx1lpJUA4OAIVzi1lV3NhEVJNBF8Bvt6QW3+QobeNx4jrHbWKycYG9e7ig6IcePbFpirmqwhbs81FWjlSA==,iv:j2IKvWNp6+bkWta7q0PcQHNca0TMk1+5qtGJA5fULnU=,tag:Pk6IobanGpl2Fz13EsxAXg==,type:str]
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
kms: []
|
||||||
gcp_kms: []
|
gcp_kms: []
|
||||||
@@ -23,8 +24,8 @@ sops:
|
|||||||
MzlQcFpSMTVTRXplSTN5WllsOTM4S00KRgnKz0cA/fMueZzFJ7VCs2jrQ29rn9sO
|
MzlQcFpSMTVTRXplSTN5WllsOTM4S00KRgnKz0cA/fMueZzFJ7VCs2jrQ29rn9sO
|
||||||
kE/8FyD1YBR/+I3qUYfRvlKAKlSrI2Mb3tlRSaSw5te3Dbqh5+tN7Q==
|
kE/8FyD1YBR/+I3qUYfRvlKAKlSrI2Mb3tlRSaSw5te3Dbqh5+tN7Q==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2025-03-12T22:11:51Z"
|
lastmodified: "2025-04-04T09:34:11Z"
|
||||||
mac: ENC[AES256_GCM,data:hlZcDq5MHF+LvWPx170QWadDFndBQ1VMNZtt6ySaLXqoetQQGDQsRCpFMd2aktwkUYVg5Opoyv+2VyvFQNjogh+j7u0mQCpTksqdJ02rgKa2zdaigpyl3wvHFbTXIx0t3aTN8ZZJBQJpo9e2b4LDJWwLi065uhspMrIjURlTwjg=,iv:NjFE6PnMjaSc/tvgqES8kbDtDI8LwPEIQU9K3wdnI8Q=,tag:BLJQAbnwwIm6CNs6BIK/tQ==,type:str]
|
mac: ENC[AES256_GCM,data:iE8FwepvNR//w/9X1nklaoOmO5ICG5Sym/1IcKsZPJiMcdxN+T6Vrgp9+I1Fmn+y7KD06iwG8cQ2IJf7wO5KplzRyCyol8fGNsh4KiiGU52MJLOzVDC4XxcRDNpxi1abrfm7xuxt1v8iL6+FIAyxtpd5QCXIihn2XnPpZymSf0Q=,iv:qZ9jFy9mIbT3GLBtKrrgz8HhjpYz7rMviyJ1PP38y6c=,tag:y+AymYMmQRIqsziqlEoR7w==,type:str]
|
||||||
pgp: []
|
pgp: []
|
||||||
unencrypted_suffix: _unencrypted
|
unencrypted_suffix: _unencrypted
|
||||||
version: 3.9.4
|
version: 3.9.4
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ job "adminer" {
|
|||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.adminer.entryPoints=websecure",
|
"traefik.http.routers.adminer.entryPoints=websecure",
|
||||||
"traefik.http.routers.adminer.middlewares=authentik@file",
|
"traefik.http.routers.adminer.middlewares=oidc-auth@file",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,118 +0,0 @@
|
|||||||
job "authentik" {
|
|
||||||
datacenters = ["alo"]
|
|
||||||
|
|
||||||
group "auth" {
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
# traefik forwardAuth hardcodes this port
|
|
||||||
static = 9000
|
|
||||||
}
|
|
||||||
port "https" {
|
|
||||||
to = 9443
|
|
||||||
}
|
|
||||||
port "metrics" {
|
|
||||||
to = 9300
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "server" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "ghcr.io/goauthentik/server:${var.authentik_version}"
|
|
||||||
ports = [
|
|
||||||
"http",
|
|
||||||
"https",
|
|
||||||
"metrics"
|
|
||||||
]
|
|
||||||
command = "server"
|
|
||||||
}
|
|
||||||
|
|
||||||
env {
|
|
||||||
AUTHENTIK_REDIS__HOST = "redis.service.consul"
|
|
||||||
AUTHENTIK_POSTGRESQL__HOST = "postgres.service.consul"
|
|
||||||
AUTHENTIK_POSTGRESQL__NAME = "${var.pg_db}"
|
|
||||||
AUTHENTIK_POSTGRESQL__USER = "${var.pg_user}"
|
|
||||||
AUTHENTIK_POSTGRESQL__PASSWORD = "${var.pg_password}"
|
|
||||||
AUTHENTIK_SECRET_KEY = "${var.secret_key}"
|
|
||||||
AUTHENTIK_EMAIL__HOST = "192.168.1.1"
|
|
||||||
AUTHENTIK_EMAIL__FROM = "authentik@paler.net"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 2000
|
|
||||||
memory = 1024
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "authentik"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
# Main UI
|
|
||||||
"traefik.http.routers.authentik.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.authentik.rule=Host(`authentik.v.paler.net`) || Host(`authentik.alo.land`)",
|
|
||||||
# Embedded outpost for forward auth
|
|
||||||
"traefik.http.routers.authentik-palernet.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.authentik-palernet.rule=HostRegexp(`{subdomain:[a-z0-9-]+}.v.paler.net`) && PathPrefix(`/outpost.goauthentik.io/`)",
|
|
||||||
"traefik.http.routers.authentik-aloland.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.authentik-aloland.rule=HostRegexp(`{subdomain:[a-z0-9-]+}.alo.land`) && PathPrefix(`/outpost.goauthentik.io/`)",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "authentik-metrics"
|
|
||||||
port = "metrics"
|
|
||||||
tags = [ "metrics" ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "worker" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "ghcr.io/goauthentik/server:${var.authentik_version}"
|
|
||||||
command = "worker"
|
|
||||||
}
|
|
||||||
|
|
||||||
env {
|
|
||||||
AUTHENTIK_REDIS__HOST = "redis.service.consul"
|
|
||||||
AUTHENTIK_POSTGRESQL__HOST = "postgres.service.consul"
|
|
||||||
AUTHENTIK_POSTGRESQL__NAME = "${var.pg_db}"
|
|
||||||
AUTHENTIK_POSTGRESQL__USER = "${var.pg_user}"
|
|
||||||
AUTHENTIK_POSTGRESQL__PASSWORD = "${var.pg_password}"
|
|
||||||
AUTHENTIK_SECRET_KEY = "${var.secret_key}"
|
|
||||||
AUTHENTIK_EMAIL__HOST = "192.168.1.1"
|
|
||||||
AUTHENTIK_EMAIL__FROM = "authentik@paler.net"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
memory = 600
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pg_user" {
|
|
||||||
type = string
|
|
||||||
default = "authentik"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pg_password" {
|
|
||||||
type = string
|
|
||||||
default = "aQueiquuo6aiyah5eoch"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "pg_db" {
|
|
||||||
type = string
|
|
||||||
default = "authentik"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "secret_key" {
|
|
||||||
type = string
|
|
||||||
default = "uUzCYhGV93Z8wKLAScuGFqBskxyzSfG4cz6bnXq6McM67Ho7p9"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "authentik_version" {
|
|
||||||
type = string
|
|
||||||
default = "2024.12.3"
|
|
||||||
}
|
|
||||||
@@ -22,7 +22,7 @@ job "beancount" {
|
|||||||
image = "gitea.v.paler.net/ppetru/fava:latest"
|
image = "gitea.v.paler.net/ppetru/fava:latest"
|
||||||
ports = ["http"]
|
ports = ["http"]
|
||||||
volumes = [
|
volumes = [
|
||||||
"/data/compute/appdata/beancount:/beancount",
|
"/data/services/beancount:/beancount",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,7 +37,7 @@ job "beancount" {
|
|||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.finances.entryPoints=websecure",
|
"traefik.http.routers.finances.entryPoints=websecure",
|
||||||
"traefik.http.routers.finances.middlewares=authentik@file",
|
"traefik.http.routers.finances.middlewares=oidc-auth@file",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,13 @@ job "clickhouse" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
group "db" {
|
group "db" {
|
||||||
|
# Run on primary storage node (zippy) for local disk performance
|
||||||
|
# TODO: move to fractal once it's converted to NixOS (spinning disks OK for time-series data)
|
||||||
|
constraint {
|
||||||
|
attribute = "${meta.storage_role}"
|
||||||
|
value = "primary"
|
||||||
|
}
|
||||||
|
|
||||||
network {
|
network {
|
||||||
port "clickhouse" {
|
port "clickhouse" {
|
||||||
static = 8123
|
static = 8123
|
||||||
@@ -16,9 +23,9 @@ job "clickhouse" {
|
|||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
image = "clickhouse/clickhouse-server:25.2-alpine"
|
image = "clickhouse/clickhouse-server:25.9"
|
||||||
volumes = [
|
volumes = [
|
||||||
"/data/compute/appdata/clickhouse:/var/lib/clickhouse",
|
"/data/services/clickhouse:/var/lib/clickhouse",
|
||||||
"local/clickhouse-config.xml:/etc/clickhouse-server/config.d/logging.xml:ro",
|
"local/clickhouse-config.xml:/etc/clickhouse-server/config.d/logging.xml:ro",
|
||||||
"local/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro",
|
"local/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,102 +0,0 @@
|
|||||||
job "couchdb" {
|
|
||||||
datacenters = ["alo"]
|
|
||||||
|
|
||||||
meta {
|
|
||||||
uuid = uuidv4()
|
|
||||||
}
|
|
||||||
|
|
||||||
group "db" {
|
|
||||||
network {
|
|
||||||
port "api" {
|
|
||||||
to = 5984
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "server" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "couchdb:3.3"
|
|
||||||
ports = ["api"]
|
|
||||||
volumes = [
|
|
||||||
"/data/compute/appdata/couchdb:/opt/couchdb/data",
|
|
||||||
"local/couchdb.ini:/opt/couchdb/etc/local.d/local.ini",
|
|
||||||
"local/vm.args:/opt/couchdb/etc/vm.args",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "couchdb"
|
|
||||||
port = "api"
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.couchdb.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.couchdb.rule=Host(`pidb.paler.net`)",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
memory = 2000
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
# (Debian) Package-introduced administrative user
|
|
||||||
[admins]
|
|
||||||
admin = -pbkdf2-eeb3e20eb9b58edec62d10987d7aed3465c425d4,3cf6e90591d435fbfa9262693490b9c8,10
|
|
||||||
|
|
||||||
[couchdb]
|
|
||||||
uuid = 66ab957b6c21d9fd2ff6bda36da9f4b7
|
|
||||||
|
|
||||||
[couch_httpd_auth]
|
|
||||||
secret = a57bfaa045b960c301411bb0893d88ac
|
|
||||||
allow_persistent_cookies = true
|
|
||||||
; 8 weeks
|
|
||||||
timeout = 4838400
|
|
||||||
|
|
||||||
[cors]
|
|
||||||
origins = https://pi.paler.net,https://noteself.org
|
|
||||||
credentials = true
|
|
||||||
headers = accept, authorization, content-type, origin, referer
|
|
||||||
methods = GET, PUT, POST, HEAD, DELETE
|
|
||||||
|
|
||||||
[httpd]
|
|
||||||
enable_cors = true
|
|
||||||
EOH
|
|
||||||
destination = "local/couchdb.ini"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
-name couchdb@127.0.0.1
|
|
||||||
|
|
||||||
# All nodes must share the same magic cookie for distributed Erlang to work.
|
|
||||||
# Comment out this line if you synchronized the cookies by other means (using
|
|
||||||
# the ~/.erlang.cookie file, for example).
|
|
||||||
#-setcookie monster
|
|
||||||
|
|
||||||
# Tell kernel and SASL not to log anything
|
|
||||||
-kernel error_logger silent
|
|
||||||
-sasl sasl_error_logger false
|
|
||||||
|
|
||||||
# Use kernel poll functionality if supported by emulator
|
|
||||||
+K true
|
|
||||||
|
|
||||||
# Start a pool of asynchronous IO threads
|
|
||||||
+A 16
|
|
||||||
|
|
||||||
# Comment this line out to enable the interactive Erlang shell on startup
|
|
||||||
+Bd -noinput
|
|
||||||
|
|
||||||
# Force use of the smp scheduler, fixes #1296
|
|
||||||
-smp enable
|
|
||||||
|
|
||||||
# Set maximum SSL session lifetime to reap terminated replication readers
|
|
||||||
-ssl session_lifetime 300
|
|
||||||
EOH
|
|
||||||
destination = "local/vm.args"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -32,8 +32,8 @@ job "evcc" {
|
|||||||
]
|
]
|
||||||
|
|
||||||
volumes = [
|
volumes = [
|
||||||
"/data/compute/appdata/evcc/evcc.yaml:/etc/evcc.yaml",
|
"/data/services/evcc/evcc.yaml:/etc/evcc.yaml",
|
||||||
"/data/compute/appdata/evcc/evcc:/root/.evcc",
|
"/data/services/evcc/evcc:/root/.evcc",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,7 +49,7 @@ job "evcc" {
|
|||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.evcc.entryPoints=websecure",
|
"traefik.http.routers.evcc.entryPoints=websecure",
|
||||||
"traefik.http.routers.evcc.middlewares=authentik@file",
|
"traefik.http.routers.evcc.middlewares=oidc-auth@file",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ job "farmos" {
|
|||||||
image = "gitea.v.paler.net/ppetru/farmos:latest"
|
image = "gitea.v.paler.net/ppetru/farmos:latest"
|
||||||
ports = ["http"]
|
ports = ["http"]
|
||||||
volumes = [
|
volumes = [
|
||||||
"/data/compute/appdata/farmos/sites:/opt/drupal/web/sites",
|
"/data/services/farmos/sites:/opt/drupal/web/sites",
|
||||||
"/data/compute/appdata/farmos/keys:/opt/drupal/keys",
|
"/data/services/farmos/keys:/opt/drupal/keys",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,69 +0,0 @@
|
|||||||
job "ghost" {
|
|
||||||
datacenters = ["alo"]
|
|
||||||
|
|
||||||
meta {
|
|
||||||
uuid = uuidv4()
|
|
||||||
}
|
|
||||||
|
|
||||||
group "web" {
|
|
||||||
network {
|
|
||||||
port "http" {
|
|
||||||
to = 2368
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "server" {
|
|
||||||
driver = "docker"
|
|
||||||
|
|
||||||
config {
|
|
||||||
image = "ghost:latest"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
"/data/compute/appdata/ghost:/var/lib/ghost/content",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
env {
|
|
||||||
url = "https://alo.land"
|
|
||||||
mail__transport = "SMTP"
|
|
||||||
mail__options__service = "Mailgun"
|
|
||||||
mail__options__host = "smtp.eu.mailgun.org"
|
|
||||||
mail__options__port = "465"
|
|
||||||
mail__options__secure = "true"
|
|
||||||
mail__options__auth__user = "postmaster@mg.alo.land"
|
|
||||||
mail__options__auth__pass = "63eb13eabe77f639cbde3d14793f42ef-602cc1bf-36f99a0c"
|
|
||||||
database__connection__host = "mysql.service.consul"
|
|
||||||
database__connection__database = "alo"
|
|
||||||
database__connection__user = "ghost"
|
|
||||||
database__connection__password = "cohNeiveoGa9eedohFie"
|
|
||||||
}
|
|
||||||
|
|
||||||
service {
|
|
||||||
name = "ghost"
|
|
||||||
port = "http"
|
|
||||||
check {
|
|
||||||
type = "http"
|
|
||||||
port = "http"
|
|
||||||
path = "/ghost/api/admin/site/"
|
|
||||||
header {
|
|
||||||
X-Forwarded-Proto = ["https"]
|
|
||||||
Host = ["alo.land"]
|
|
||||||
}
|
|
||||||
interval = "30s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.ghost.entryPoints=websecure",
|
|
||||||
"traefik.http.routers.ghost.rule=Host(`alo.land`)",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
cpu = 500
|
|
||||||
memory = 512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -25,8 +25,8 @@ job "gitea" {
|
|||||||
"ssh",
|
"ssh",
|
||||||
]
|
]
|
||||||
volumes = [
|
volumes = [
|
||||||
"/data/compute/appdata/gitea/data:/var/lib/gitea",
|
"/data/services/gitea/data:/var/lib/gitea",
|
||||||
"/data/compute/appdata/gitea/config:/etc/gitea",
|
"/data/services/gitea/config:/etc/gitea",
|
||||||
"/etc/timezone:/etc/timezone:ro",
|
"/etc/timezone:/etc/timezone:ro",
|
||||||
"/etc/localtime:/etc/localtime:ro",
|
"/etc/localtime:/etc/localtime:ro",
|
||||||
]
|
]
|
||||||
@@ -51,6 +51,7 @@ job "gitea" {
|
|||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.gitea.entryPoints=websecure",
|
"traefik.http.routers.gitea.entryPoints=websecure",
|
||||||
|
"traefik.http.services.gitea.loadBalancer.serversTransport=gitea-transport@file",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ job "grafana" {
|
|||||||
config {
|
config {
|
||||||
image = "grafana/grafana-enterprise:latest"
|
image = "grafana/grafana-enterprise:latest"
|
||||||
ports = [ "http" ]
|
ports = [ "http" ]
|
||||||
volumes = [ "/data/compute/appdata/grafana:/var/lib/grafana" ]
|
volumes = [ "/data/services/grafana:/var/lib/grafana" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
env {
|
env {
|
||||||
@@ -25,19 +25,22 @@ job "grafana" {
|
|||||||
GF_SERVER_ROOT_URL = "https://grafana.v.paler.net"
|
GF_SERVER_ROOT_URL = "https://grafana.v.paler.net"
|
||||||
GF_AUTH_BASIC_ENABLED = "false"
|
GF_AUTH_BASIC_ENABLED = "false"
|
||||||
GF_AUTH_GENERIC_OAUTH_ENABLED = "true"
|
GF_AUTH_GENERIC_OAUTH_ENABLED = "true"
|
||||||
GF_AUTH_GENERIC_OAUTH_NAME = "authentik"
|
GF_AUTH_GENERIC_OAUTH_NAME = "Pocket ID"
|
||||||
GF_AUTH_GENERIC_OAUTH_CLIENT_ID = "E78NG1AZeW6FaAox0mUhaTSrHeqFgNkWG12My2zx"
|
GF_AUTH_GENERIC_OAUTH_CLIENT_ID = "99e44cf2-ecc6-4e82-8882-129c017f8a4a"
|
||||||
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET = "N7u2RfFZ5KVLdEkhlpUTzymGxeK5rLo9SYZLSGGBXJDr46p5g5uv1qZ4Jm2d1rP4aJX4PSzauZlxHhkG2byiBFMbdo6K742KXcEimZsOBFiNKeWOHxofYerBnPuoECQW"
|
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET = "NjJ9Uro4MK7siqLGSmkiQmjFuESulqQN"
|
||||||
GF_AUTH_GENERIC_OAUTH_SCOPES = "openid profile email offline_access"
|
GF_AUTH_GENERIC_OAUTH_SCOPES = "openid profile email groups"
|
||||||
GF_AUTH_GENERIC_OAUTH_AUTH_URL = "https://authentik.v.paler.net/application/o/authorize/"
|
GF_AUTH_GENERIC_OAUTH_AUTH_URL = "https://pocket-id.v.paler.net/authorize"
|
||||||
GF_AUTH_GENERIC_OAUTH_TOKEN_URL = "https://authentik.v.paler.net/application/o/token/"
|
GF_AUTH_GENERIC_OAUTH_TOKEN_URL = "https://pocket-id.v.paler.net/api/oidc/token"
|
||||||
GF_AUTH_GENERIC_OAUTH_API_URL = "https://authentik.v.paler.net/application/o/userinfo/"
|
GF_AUTH_GENERIC_OAUTH_API_URL = "https://pocket-id.v.paler.net/api/oidc/userinfo"
|
||||||
GF_AUTH_SIGNOUT_REDIRECT_URL = "https://authentik.v.paler.net/application/o/grafana/end-session/"
|
GF_AUTH_SIGNOUT_REDIRECT_URL = "https://pocket-id.v.paler.net/logout"
|
||||||
# Optionally enable auto-login (bypasses Grafana login screen)
|
# Optionally enable auto-login (bypasses Grafana login screen)
|
||||||
GF_AUTH_OAUTH_AUTO_LOGIN = "true"
|
GF_AUTH_OAUTH_AUTO_LOGIN = "true"
|
||||||
# Optionally map user groups to Grafana roles
|
# Optionally map user groups to Grafana roles
|
||||||
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH = "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'"
|
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH = "contains(groups[*], 'admins') && 'Admin' || contains(groups[*], 'residents') && 'Editor' || 'Viewer'"
|
||||||
GF_AUTH_GENERIC_OAUTH_USE_REFRESH_TOKEN = "true"
|
GF_AUTH_GENERIC_OAUTH_USE_REFRESH_TOKEN = "true"
|
||||||
|
GF_AUTH_GENERIC_OAUTH_EMAIL_ATTRIBUTE_PATH = "email"
|
||||||
|
GF_AUTH_GENERIC_OAUTH_LOGIN_ATTRIBUTE_PATH = "preferred_username"
|
||||||
|
GF_AUTH_GENERIC_OAUTH_NAME_ATTRIBUTE_PATH = "name"
|
||||||
#GF_LOG_LEVEL = "debug"
|
#GF_LOG_LEVEL = "debug"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
50
services/homepage.hcl
Normal file
50
services/homepage.hcl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
job "homepage" {
|
||||||
|
datacenters = ["alo"]
|
||||||
|
|
||||||
|
group "app" {
|
||||||
|
network {
|
||||||
|
port "http" { to = 3000 }
|
||||||
|
}
|
||||||
|
|
||||||
|
task "homepage" {
|
||||||
|
driver = "docker"
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "ghcr.io/gethomepage/homepage:latest"
|
||||||
|
ports = [ "http" ]
|
||||||
|
volumes = [
|
||||||
|
"/data/services/homepage:/app/config",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
PUID = 1000
|
||||||
|
PGID = 1000
|
||||||
|
HOMEPAGE_ALLOWED_HOSTS = "homepage.v.paler.net"
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 200
|
||||||
|
memory = 256
|
||||||
|
}
|
||||||
|
|
||||||
|
service {
|
||||||
|
name = "homepage"
|
||||||
|
port = "http"
|
||||||
|
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.homepage.entryPoints=websecure",
|
||||||
|
"traefik.http.routers.homepage.middlewares=oidc-auth@file",
|
||||||
|
]
|
||||||
|
|
||||||
|
check {
|
||||||
|
type = "http"
|
||||||
|
path = "/"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "5s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user