Compare commits
151 Commits
f918ff5df2
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 3016301729 | |||
| 027a9c675d | |||
| 14d267e12d | |||
| 2e8e11ecec | |||
| f90fa5c23b | |||
| caa6d0aafd | |||
| 29043896c8 | |||
| 1af9053cd5 | |||
| 2dcd03cbb0 | |||
| b5f0cdb429 | |||
| b63abca296 | |||
| 1311aadffb | |||
| f903ddeee5 | |||
| 33f3ddd7e9 | |||
| 1cdedf824c | |||
| beb856714e | |||
| fcb2067059 | |||
| cebd236b1f | |||
| 8cc818f6b2 | |||
| 305a7a5115 | |||
| 526888cd26 | |||
| 8d97d09b07 | |||
| 3f481e0a16 | |||
| 15dea7a249 | |||
| e1bace9044 | |||
| 09f2d2b013 | |||
| d195efdb0e | |||
| 3277c810a5 | |||
| f2baf3daf6 | |||
| 931470ee0a | |||
| 41b30788fe | |||
| 01ebff3596 | |||
| ed2c899915 | |||
| c548ead4f7 | |||
| 3b8cd7b742 | |||
| d71408b567 | |||
| a8147d9ae5 | |||
| 2b1950d4e3 | |||
| 322927e2b0 | |||
| 4cae9fe706 | |||
| b5b164b543 | |||
| 08db384f60 | |||
| 3b2cd0c3cf | |||
| 13a4467166 | |||
| 4c0b0fb780 | |||
| a09d1b49c2 | |||
| 8d381ef9f4 | |||
| 79d51c3f58 | |||
| 83fb796a9f | |||
| 4efc44e964 | |||
| 3970c60016 | |||
| a8b63e71c8 | |||
| 58c851004d | |||
| bd889902be | |||
| 7fd79c9911 | |||
| 41eacfec02 | |||
| 0a0748b920 | |||
| d6e0e09e87 | |||
| 61c3020a5e | |||
| 972b973f58 | |||
| 8c5a7b78c6 | |||
| 675204816a | |||
| 3bb82dbc6b | |||
| 0f6233c3ec | |||
| 43fa56bf35 | |||
| 50c930eeaf | |||
| 8dde15b8ef | |||
| 6100d8dc69 | |||
| a92f0fcb28 | |||
| bd4604cdcc | |||
| 31db372b43 | |||
| 360e776745 | |||
| 5a819f70bb | |||
| b2c055ffb2 | |||
| 6e0b34843b | |||
| e8485e3bb7 | |||
| e8cd970960 | |||
| 78b59cec4f | |||
| e6d40a9f7e | |||
| 7733a1be46 | |||
| a5df98bc5a | |||
| fb9b0dd2f5 | |||
| 0dc214069c | |||
| a6c4be9530 | |||
| 6e338e6d65 | |||
| 41f16fa0b8 | |||
| 1b05728817 | |||
| 520a417316 | |||
| 88ed5360ca | |||
| 392d40def3 | |||
| 5ef4d832fb | |||
| 49afc0c084 | |||
| b2c82ceaa8 | |||
| b9286d7243 | |||
| 22931e6747 | |||
| ac030018c6 | |||
| 7386d3a5ee | |||
| 2a5a9f2ee9 | |||
| 963a7c10fa | |||
| 283cf9d614 | |||
| 5b3b4ea2ed | |||
| 5a9d5de5c4 | |||
| a5e3f613c2 | |||
| 8b8fac2d89 | |||
| 31d79ba75b | |||
| 6faf148fde | |||
| e88f1c93c5 | |||
| 51375db1e4 | |||
| 9415a8ece2 | |||
| da85ee776d | |||
| e23dc7df5b | |||
| 163b9e4c22 | |||
| d521c3b013 | |||
| d123400ea9 | |||
| 9c64a8ec00 | |||
| 4907238726 | |||
| 37aad7d951 | |||
| ac34f029ed | |||
| 8d04add7dc | |||
| d7a07cebf5 | |||
| 2ba961bfa8 | |||
| 765e92f9c7 | |||
| 1bb202d017 | |||
| 98769f59d6 | |||
| 762037d17f | |||
| 32a22c783d | |||
| 8c29c18287 | |||
| 092a8b3658 | |||
| c7ff79d0c3 | |||
| ac51f50ef5 | |||
| c5347b6eba | |||
| d4525313bb | |||
| 92a27ac92b | |||
| fabfeea1c2 | |||
| 5ce0e0e1df | |||
| bd473d1ad2 | |||
| 064d227344 | |||
| dd8fee0ecb | |||
| a2b54be875 | |||
| ccf6154ba0 | |||
| bd5988dfbc | |||
| a57fc9107b | |||
| a7dce7cfb9 | |||
| b608e110c9 | |||
| 78dee346e9 | |||
| 66f26842c9 | |||
| 9c504e0278 | |||
| 4035d38ab2 | |||
| 53ef2f6293 | |||
| e5cd9bd98e | |||
| 0b51b44856 |
96
.gitea/workflows/deploy-nomad.yaml
Normal file
96
.gitea/workflows/deploy-nomad.yaml
Normal file
@@ -0,0 +1,96 @@
|
||||
# ABOUTME: Reusable workflow for building Nix Docker images and deploying to Nomad.
|
||||
# ABOUTME: Called by service repos with: uses: alo/alo-cluster/.gitea/workflows/deploy-nomad.yaml@master
|
||||
|
||||
name: Deploy to Nomad
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
service_name:
|
||||
required: true
|
||||
type: string
|
||||
description: "Nomad job name (must match job ID in services/*.hcl)"
|
||||
flake_output:
|
||||
required: false
|
||||
type: string
|
||||
default: "dockerImage"
|
||||
description: "Flake output to build (default: dockerImage)"
|
||||
registry:
|
||||
required: false
|
||||
type: string
|
||||
default: "gitea.v.paler.net"
|
||||
description: "Container registry hostname"
|
||||
secrets:
|
||||
REGISTRY_USERNAME:
|
||||
required: true
|
||||
REGISTRY_PASSWORD:
|
||||
required: true
|
||||
NOMAD_ADDR:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Build Docker image
|
||||
run: |
|
||||
echo "Building .#${{ inputs.flake_output }}..."
|
||||
nix build ".#${{ inputs.flake_output }}" --out-link result
|
||||
|
||||
- name: Push to registry
|
||||
run: |
|
||||
echo "Pushing to ${{ inputs.registry }}/alo/${{ inputs.service_name }}:latest..."
|
||||
skopeo copy \
|
||||
--dest-creds "${{ secrets.REGISTRY_USERNAME }}:${{ secrets.REGISTRY_PASSWORD }}" \
|
||||
--insecure-policy \
|
||||
docker-archive:result \
|
||||
"docker://${{ inputs.registry }}/alo/${{ inputs.service_name }}:latest"
|
||||
|
||||
- name: Deploy to Nomad
|
||||
env:
|
||||
NOMAD_ADDR: ${{ secrets.NOMAD_ADDR }}
|
||||
SERVICE: ${{ inputs.service_name }}
|
||||
run: |
|
||||
echo "Deploying $SERVICE to Nomad..."
|
||||
|
||||
# Fetch current job, update UUID to force deployment
|
||||
JOB=$(curl -sS "$NOMAD_ADDR/v1/job/$SERVICE")
|
||||
NEW_UUID=$(cat /proc/sys/kernel/random/uuid)
|
||||
echo "New deployment UUID: $NEW_UUID"
|
||||
UPDATED_JOB=$(echo "$JOB" | jq --arg uuid "$NEW_UUID" '.Meta.uuid = $uuid')
|
||||
|
||||
# Submit updated job
|
||||
RESULT=$(echo "{\"Job\": $UPDATED_JOB}" | curl -sS -X POST "$NOMAD_ADDR/v1/jobs" \
|
||||
-H "Content-Type: application/json" -d @-)
|
||||
echo "Submit result: $RESULT"
|
||||
|
||||
# Monitor deployment
|
||||
sleep 3
|
||||
DEPLOY_ID=$(curl -sS "$NOMAD_ADDR/v1/job/$SERVICE/deployments" | jq -r '.[0].ID')
|
||||
echo "Deployment ID: $DEPLOY_ID"
|
||||
|
||||
if [ "$DEPLOY_ID" = "null" ]; then
|
||||
echo "ERROR: No deployment created. Ensure job has 'update' stanza with 'auto_revert = true'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Monitoring deployment..."
|
||||
for i in $(seq 1 30); do
|
||||
STATUS=$(curl -sS "$NOMAD_ADDR/v1/deployment/$DEPLOY_ID" | jq -r '.Status')
|
||||
echo "[$i/30] Deployment status: $STATUS"
|
||||
case $STATUS in
|
||||
successful)
|
||||
echo "Deployment successful!"
|
||||
exit 0
|
||||
;;
|
||||
failed|cancelled)
|
||||
echo "Deployment failed or cancelled"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
sleep 10
|
||||
done
|
||||
echo "Timeout waiting for deployment"
|
||||
exit 1
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,3 +2,5 @@
|
||||
.tmp
|
||||
result
|
||||
.aider*
|
||||
.claude
|
||||
.direnv/
|
||||
|
||||
23
.sops.yaml
23
.sops.yaml
@@ -2,10 +2,12 @@ keys:
|
||||
- &admin_ppetru age1df9ukkmg9yn9cjeheq9m6wspa420su8qarmq570rdvf2de3rl38saqauwn
|
||||
- &server_zippy age1gtyw202hd07hddac9886as2cs8pm07e4exlnrgfm72lync75ng9qc5fjac
|
||||
- &server_chilly age16yqffw4yl5jqvsr7tyd883vn98zw0attuv9g5snc329juff6dy3qw2w5wp
|
||||
- &server_sparky age1zxf8263nk04zf4pu5x2czh6g4trv4e2xydypyjschyekr6udqcsqmrgv68
|
||||
- &server_sparky age14aml5s3sxksa8qthnt6apl3pu6egxyn0cz7pdzzvp2yl6wncad0q56udyj
|
||||
- &server_stinky age1me78u46409q9ez6fj0qanrfffc5e9kuq7n7uuvlljfwwc2mdaezqmyzxhx
|
||||
- &server_beefy age1cs8uqj243lspyp042ueu5aes4t3azgyuaxl9au70ggrl2meulq4sgqpc7y
|
||||
- &server_alo_cloud_1 age1w5w4wfvtul3sge9mt205zvrkjaeh3qs9gsxhmq7df2g4dztnvv6qylup8z
|
||||
- &server_c1 age1wwufz86tm3auxn6pn27c47s8rvu7en58rk00nghtaxsdpw0gya6qj6qxdt
|
||||
- &server_c2 age1c2kc034n7tqztarcu7n5ldnjmy9sr3jgwrsaddsj0hwfus9mdp3sctts4m
|
||||
- &server_c2 age1jy7pe4530s8w904wtvrmpxvteztqy5ewdt92a7y3lq87sg9jce5qxxuydt
|
||||
- &server_c3 age1zjgqu3zks5kvlw6hvy6ytyygq7n25lu0uj2435zlf30smpxuy4hshpmfer
|
||||
creation_rules:
|
||||
- path_regex: secrets/common\.yaml
|
||||
@@ -15,6 +17,8 @@ creation_rules:
|
||||
- *server_zippy
|
||||
- *server_chilly
|
||||
- *server_sparky
|
||||
- *server_stinky
|
||||
- *server_beefy
|
||||
- *server_alo_cloud_1
|
||||
- *server_c1
|
||||
- *server_c2
|
||||
@@ -34,6 +38,21 @@ creation_rules:
|
||||
- age:
|
||||
- *admin_ppetru
|
||||
- *server_sparky
|
||||
- path_regex: secrets/stinky\.yaml
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_ppetru
|
||||
- *server_stinky
|
||||
- path_regex: secrets/beefy\.yaml
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_ppetru
|
||||
- *server_beefy
|
||||
- path_regex: secrets/wifi\.yaml
|
||||
key_groups:
|
||||
- age:
|
||||
- *admin_ppetru
|
||||
- *server_stinky
|
||||
- path_regex: secrets/alo-cloud-1\.yaml
|
||||
key_groups:
|
||||
- age:
|
||||
|
||||
100
CLAUDE.md
100
CLAUDE.md
@@ -6,47 +6,67 @@ NixOS cluster configuration using flakes. Homelab infrastructure with Nomad/Cons
|
||||
|
||||
```
|
||||
├── common/
|
||||
│ ├── global/ # Applied to all hosts (backup, sops, users, etc.)
|
||||
│ ├── compute-node.nix # Nomad client + Consul agent + NFS client
|
||||
│ ├── cluster-node.nix # Nomad server + Consul server (for quorum members)
|
||||
│ ├── nfs-services-server.nix # NFS server + btrfs replication (zippy)
|
||||
│ └── nfs-services-standby.nix # NFS standby + receive replication (c1, c2)
|
||||
├── hosts/
|
||||
│ ├── c1/, c2/, c3/ # Cattle nodes (compute, quorum members)
|
||||
│ ├── zippy/ # Primary storage + NFS server + stateful workloads
|
||||
│ ├── fractal/ # (Proxmox, will become NixOS storage node)
|
||||
│ ├── sunny/ # (Standalone ethereum node, not in cluster)
|
||||
│ └── chilly/ # (Home Assistant VM, not in cluster)
|
||||
│ ├── global/ # Applied to all hosts (backup, sops, users, etc.)
|
||||
│ ├── minimal-node.nix # Base (ssh, user, boot, impermanence)
|
||||
│ ├── cluster-member.nix # Consul agent + storage mounts (NFS/CIFS)
|
||||
│ ├── nomad-worker.nix # Nomad client (runs jobs) + Docker + NFS deps
|
||||
│ ├── nomad-server.nix # Enables Consul + Nomad server mode
|
||||
│ ├── cluster-tools.nix # Just CLI tools (nomad, wander, damon)
|
||||
│ ├── workstation-node.nix # Dev tools (wget, deploy-rs, docker, nix-ld)
|
||||
│ ├── desktop-node.nix # Hyprland + GUI environment
|
||||
│ ├── nfs-services-server.nix # NFS server + btrfs replication
|
||||
│ └── nfs-services-standby.nix # NFS standby + receive replication
|
||||
├── hosts/ # Host configs - check imports for roles
|
||||
├── docs/
|
||||
│ ├── CLUSTER_REVAMP.md # Master plan for architecture changes
|
||||
│ ├── MIGRATION_TODO.md # Tracking checklist for migration
|
||||
│ └── NFS_FAILOVER.md # NFS failover procedures
|
||||
│ ├── NFS_FAILOVER.md # NFS failover procedures
|
||||
│ └── AUTH_SETUP.md # Authentication (Pocket ID + Traefik OIDC)
|
||||
└── services/ # Nomad job specs (.hcl files)
|
||||
```
|
||||
|
||||
## Current Architecture (transitioning)
|
||||
|
||||
**OLD**: GlusterFS on c1/c2/c3 at `/data/compute` (being phased out)
|
||||
**NEW**: NFS from zippy at `/data/services` (current target)
|
||||
## Current Architecture
|
||||
|
||||
### Storage Mounts
|
||||
- `/data/services` - NFS from `data-services.service.consul` (zippy primary, c1 standby)
|
||||
- `/data/media` - CIFS from fractal (existing, unchanged)
|
||||
- `/data/shared` - CIFS from fractal (existing, unchanged)
|
||||
- `/data/services` - NFS from `data-services.service.consul` (check nfs-services-server.nix for primary)
|
||||
- `/data/media` - CIFS from fractal
|
||||
- `/data/shared` - CIFS from fractal
|
||||
|
||||
### Hosts
|
||||
- **c1, c2, c3**: Cattle nodes, run most workloads, Nomad/Consul quorum
|
||||
- **zippy**: Primary NFS server, runs databases (affinity), replicates to c1 every 5min
|
||||
- **fractal**: Storage node (Proxmox/ZFS), will join quorum after GlusterFS removed
|
||||
- **sunny**: Standalone ethereum staking node
|
||||
- **chilly**: Home Assistant VM
|
||||
### Cluster Roles (check hosts/*/default.nix for each host's imports)
|
||||
- **Quorum**: hosts importing `nomad-server.nix` (3 expected for consensus)
|
||||
- **Workers**: hosts importing `nomad-worker.nix` (run Nomad jobs)
|
||||
- **NFS server**: host importing `nfs-services-server.nix` (affinity for direct disk access like DBs)
|
||||
- **Standby**: hosts importing `nfs-services-standby.nix` (receive replication)
|
||||
|
||||
## Config Architecture
|
||||
|
||||
**Modular role-based configs** (compose as needed):
|
||||
- `minimal-node.nix` - Base for all systems (SSH, user, boot, impermanence)
|
||||
- `cluster-member.nix` - Consul agent + shared storage mounts (no Nomad)
|
||||
- `nomad-worker.nix` - Nomad client to run jobs (requires cluster-member)
|
||||
- `nomad-server.nix` - Enables Consul + Nomad server mode (for quorum members)
|
||||
- `cluster-tools.nix` - Just CLI tools (no services)
|
||||
|
||||
**Machine type configs** (via flake profile):
|
||||
- `workstation-node.nix` - Dev tools (deploy-rs, docker, nix-ld, emulation)
|
||||
- `desktop-node.nix` - Extends workstation + Hyprland/GUI
|
||||
|
||||
**Composition patterns**:
|
||||
- Quorum member: `cluster-member + nomad-worker + nomad-server`
|
||||
- Worker only: `cluster-member + nomad-worker`
|
||||
- CLI only: `cluster-member + cluster-tools` (Consul agent, no Nomad service)
|
||||
- NFS primary: `cluster-member + nomad-worker + nfs-services-server`
|
||||
- Standalone: `minimal-node` only (no cluster membership)
|
||||
|
||||
**Key insight**: Profiles (workstation/desktop) don't imply cluster roles. Check imports for actual roles.
|
||||
|
||||
## Key Patterns
|
||||
|
||||
**NFS Server/Standby**:
|
||||
- Primary (zippy): imports `nfs-services-server.nix`, sets `standbys = ["c1"]`
|
||||
- Standby (c1): imports `nfs-services-standby.nix`, sets `replicationKeys = [...]`
|
||||
- Primary: imports `nfs-services-server.nix`, sets `standbys = [...]`
|
||||
- Standby: imports `nfs-services-standby.nix`, sets `replicationKeys = [...]`
|
||||
- Replication: btrfs send/receive every 5min, incremental with fallback to full
|
||||
- Check host configs for current primary/standby assignments
|
||||
|
||||
**Backups**:
|
||||
- Kopia client on all nodes → Kopia server on fractal
|
||||
@@ -57,28 +77,26 @@ NixOS cluster configuration using flakes. Homelab infrastructure with Nomad/Cons
|
||||
- SOPS for secrets, files in `secrets/`
|
||||
- Keys managed per-host
|
||||
|
||||
**Authentication**:
|
||||
- Pocket ID (OIDC provider) at `pocket-id.v.paler.net`
|
||||
- Traefik uses `traefik-oidc-auth` plugin for SSO
|
||||
- Services add `middlewares=oidc-auth@file` tag to protect
|
||||
- See `docs/AUTH_SETUP.md` for details
|
||||
|
||||
## Migration Status
|
||||
|
||||
**Phase**: 4 in progress (20/35 services migrated)
|
||||
**Current**: Migrating services from GlusterFS → NFS
|
||||
**Next**: Finish migrating remaining services, update host volumes, remove GlusterFS
|
||||
**Later**: Convert fractal to NixOS (deferred)
|
||||
**Phase 3 & 4**: COMPLETE! GlusterFS removed, all services on NFS
|
||||
**Next**: Convert fractal to NixOS (deferred)
|
||||
|
||||
See `docs/MIGRATION_TODO.md` for detailed checklist.
|
||||
|
||||
**IMPORTANT**: When working on migration tasks:
|
||||
1. Always update `docs/MIGRATION_TODO.md` after completing each service migration
|
||||
2. Update both the individual service checklist AND the summary counts at the bottom
|
||||
3. Pattern: `/data/compute/appdata/foo` → `/data/services/foo` (NOT `/data/services/appdata/foo`!)
|
||||
4. Migration workflow per service: stop → copy data → edit config → start → update MIGRATION_TODO.md
|
||||
|
||||
## Common Tasks
|
||||
|
||||
**Deploy a host**: `deploy -s '.#hostname'`
|
||||
**Deploy all**: `deploy`
|
||||
**Check replication**: `ssh zippy journalctl -u replicate-services-to-c1.service -f`
|
||||
**Check replication**: Check NFS primary host, then `ssh <primary> journalctl -u replicate-services-to-*.service -f`
|
||||
**NFS failover**: See `docs/NFS_FAILOVER.md`
|
||||
**Nomad jobs**: `services/*.hcl` - update paths: `/data/compute/appdata/foo` → `/data/services/foo` (NOT `/data/services/appdata/foo`!)
|
||||
**Nomad jobs**: `services/*.hcl` - service data stored at `/data/services/<service-name>`
|
||||
|
||||
## Troubleshooting Hints
|
||||
|
||||
@@ -90,8 +108,8 @@ See `docs/MIGRATION_TODO.md` for detailed checklist.
|
||||
## Important Files
|
||||
|
||||
- `common/global/backup.nix` - Kopia backup configuration
|
||||
- `hosts/zippy/default.nix` - NFS server config, replication targets
|
||||
- `hosts/c1/default.nix` - NFS standby config, authorized replication keys
|
||||
- `common/nfs-services-server.nix` - NFS server role (check hosts for which imports this)
|
||||
- `common/nfs-services-standby.nix` - NFS standby role (check hosts for which imports this)
|
||||
- `flake.nix` - Host definitions, nixpkgs inputs
|
||||
|
||||
---
|
||||
|
||||
@@ -22,7 +22,6 @@ Each layer extends the previous one, inheriting all configurations. Hosts select
|
||||
|
||||
### Special Node Types
|
||||
|
||||
- **cloud-node**: Minimal + Consul only (cloud VPS deployments)
|
||||
- **compute-node**: Cluster + Nomad worker (container orchestration)
|
||||
|
||||
## Directory Structure
|
||||
@@ -40,9 +39,7 @@ Each layer extends the previous one, inheriting all configurations. Hosts select
|
||||
│ ├── server-node.nix # Server layer: bare metal services (future)
|
||||
│ ├── workstation-node.nix # Workstation layer: dev tools
|
||||
│ ├── desktop-node.nix # Desktop layer: GUI environment
|
||||
│ ├── cloud-node.nix # Cloud VPS profile
|
||||
│ ├── compute-node.nix # Nomad worker profile
|
||||
│ ├── base-node.nix # [DEPRECATED] Alias for cluster-node
|
||||
│ └── [feature modules] # Individual feature configs
|
||||
├── hosts/
|
||||
│ ├── c1/ # Compute node 1
|
||||
@@ -102,7 +99,7 @@ This ensures system and user configurations stay synchronized.
|
||||
| Host | Profile | Role | Hardware |
|
||||
|------|---------|------|----------|
|
||||
| **c1, c2, c3** | compute-node | Nomad workers | Bare metal servers |
|
||||
| **alo-cloud-1** | cloud-node | Reverse proxy | Cloud VPS |
|
||||
| **alo-cloud-1** | minimal | Reverse proxy (Traefik) | Cloud VPS |
|
||||
| **chilly** | server | Home Assistant in a VM | Bare metal server |
|
||||
| **zippy** | workstation | Development machine, server | Bare metal server |
|
||||
| **sparky** | desktop | Desktop environment | Bare metal desktop |
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# DEPRECATED: Use cluster-node.nix for cluster nodes or minimal-node.nix for minimal systems
|
||||
# This file is kept for backward compatibility with existing configurations
|
||||
imports = [
|
||||
./cluster-node.nix
|
||||
];
|
||||
}
|
||||
@@ -19,6 +19,8 @@
|
||||
enable = true;
|
||||
cache = {
|
||||
hostName = config.networking.hostName;
|
||||
# NOTE: These paths are hardcoded to /persist (not using config.custom.impermanence.persistPath)
|
||||
# This is acceptable since this service is only enabled on btrfs-based hosts
|
||||
dataPath = "/persist/ncps/data";
|
||||
tempPath = "/persist/ncps/tmp";
|
||||
databaseURL = "sqlite:/persist/ncps/db/db.sqlite";
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
# this line prevents hanging on network split
|
||||
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.mount-timeout=5s";
|
||||
automount_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.mount-timeout=5s,nobrl";
|
||||
in
|
||||
{
|
||||
environment.systemPackages = [ pkgs.cifs-utils ];
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Cloud node: Minimal system with Consul for cloud deployments
|
||||
imports = [
|
||||
./minimal-node.nix
|
||||
./consul.nix
|
||||
];
|
||||
}
|
||||
24
common/cluster-member.nix
Normal file
24
common/cluster-member.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{ pkgs, lib, config, ... }:
|
||||
{
|
||||
# Cluster node configuration
|
||||
# Extends minimal-node with cluster-specific services (Consul, GlusterFS, CIFS, NFS)
|
||||
# Used by: compute nodes (c1, c2, c3)
|
||||
imports = [
|
||||
./minimal-node.nix
|
||||
./unattended-encryption.nix
|
||||
./cifs-client.nix
|
||||
./consul.nix
|
||||
./nfs-services-client.nix # New: NFS client for /data/services
|
||||
];
|
||||
|
||||
options.networking.cluster.primaryInterface = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "eno1";
|
||||
description = "Primary network interface for cluster communication (Consul, NFS, etc.)";
|
||||
};
|
||||
|
||||
config = {
|
||||
# Wait for primary interface to be routable before considering network online
|
||||
systemd.network.wait-online.extraArgs = [ "--interface=${config.networking.cluster.primaryInterface}:routable" ];
|
||||
};
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Cluster node configuration
|
||||
# Extends minimal-node with cluster-specific services (Consul, GlusterFS, CIFS, NFS)
|
||||
# Used by: compute nodes (c1, c2, c3)
|
||||
imports = [
|
||||
./minimal-node.nix
|
||||
./unattended-encryption.nix
|
||||
./cifs-client.nix
|
||||
./consul.nix
|
||||
./glusterfs-client.nix # Keep during migration, will be removed in Phase 3
|
||||
./nfs-services-client.nix # New: NFS client for /data/services
|
||||
];
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Compute node: Cluster node with Nomad and GlusterFS server
|
||||
# Used by: c1, c2, c3
|
||||
imports = [
|
||||
./cluster-node.nix
|
||||
./glusterfs.nix
|
||||
./nomad.nix
|
||||
];
|
||||
}
|
||||
@@ -1,44 +1,47 @@
|
||||
{ pkgs, config, ... }:
|
||||
{ pkgs, config, lib, ... }:
|
||||
let
|
||||
servers = [
|
||||
"c1"
|
||||
"c2"
|
||||
"c3"
|
||||
];
|
||||
server_enabled = builtins.elem config.networking.hostName servers;
|
||||
in
|
||||
{
|
||||
services.consul = {
|
||||
enable = true;
|
||||
webUi = true;
|
||||
interface.advertise = "eno1";
|
||||
extraConfig = {
|
||||
client_addr = "0.0.0.0";
|
||||
datacenter = "alo";
|
||||
server = server_enabled;
|
||||
bootstrap_expect = if server_enabled then (builtins.length servers + 2) / 2 else null;
|
||||
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
|
||||
options.clusterRole.consulServer = lib.mkEnableOption "Consul server mode";
|
||||
|
||||
config = {
|
||||
services.consul = {
|
||||
enable = true;
|
||||
webUi = true;
|
||||
interface.advertise = config.networking.cluster.primaryInterface;
|
||||
extraConfig = {
|
||||
client_addr = "0.0.0.0";
|
||||
datacenter = "alo";
|
||||
server = config.clusterRole.consulServer;
|
||||
bootstrap_expect = if config.clusterRole.consulServer then (builtins.length servers + 2) / 2 else null;
|
||||
retry_join = builtins.filter (elem: elem != config.networking.hostName) servers;
|
||||
telemetry = {
|
||||
prometheus_retention_time = "24h";
|
||||
disable_hostname = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [ "/var/lib/consul" ];
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [ "/var/lib/consul" ];
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
8600
|
||||
8500
|
||||
8301
|
||||
8302
|
||||
8300
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
8600
|
||||
8301
|
||||
8302
|
||||
];
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
8600
|
||||
8500
|
||||
8301
|
||||
8302
|
||||
8300
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
8600
|
||||
8301
|
||||
8302
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Desktop profile: Graphical desktop with Hyprland
|
||||
# Extends workstation-node with desktop environment
|
||||
imports = [
|
||||
./workstation-node.nix
|
||||
];
|
||||
|
||||
# Enable Hyprland (Wayland compositor)
|
||||
programs.hyprland = {
|
||||
enable = true;
|
||||
xwayland.enable = true; # For compatibility with X11 apps if needed
|
||||
};
|
||||
|
||||
# Essential desktop services
|
||||
services.dbus.enable = true;
|
||||
|
||||
# polkit for privilege escalation
|
||||
security.polkit.enable = true;
|
||||
|
||||
# Enable sound with pipewire
|
||||
security.rtkit.enable = true;
|
||||
services.pipewire = {
|
||||
enable = true;
|
||||
alsa.enable = true;
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
};
|
||||
|
||||
# XDG portal for screen sharing, file pickers, etc.
|
||||
xdg.portal = {
|
||||
enable = true;
|
||||
extraPortals = [ pkgs.xdg-desktop-portal-hyprland ];
|
||||
};
|
||||
|
||||
# Fonts
|
||||
fonts.packages = with pkgs; [
|
||||
noto-fonts
|
||||
noto-fonts-cjk-sans
|
||||
noto-fonts-emoji
|
||||
liberation_ttf
|
||||
fira-code
|
||||
fira-code-symbols
|
||||
];
|
||||
|
||||
# Enable greetd with tuigreet for login
|
||||
services.greetd = {
|
||||
enable = true;
|
||||
settings = {
|
||||
default_session = {
|
||||
command = "${pkgs.greetd.tuigreet}/bin/tuigreet --time --remember --remember-session --sessions ${pkgs.writeText "sessions" ''
|
||||
Hyprland:Hyprland
|
||||
Console:fish
|
||||
''}";
|
||||
user = "greeter";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Environment variables for Wayland
|
||||
environment.sessionVariables = {
|
||||
NIXOS_OZONE_WL = "1"; # Hint electron apps to use Wayland
|
||||
WLR_NO_HARDWARE_CURSORS = "1"; # Fix cursor rendering on some hardware
|
||||
};
|
||||
}
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 5.7 MiB |
79
common/desktop/default.nix
Normal file
79
common/desktop/default.nix
Normal file
@@ -0,0 +1,79 @@
|
||||
# ABOUTME: NixOS desktop environment module for Hyprland
|
||||
# ABOUTME: Configures greetd, audio, bluetooth, fonts, and system services
|
||||
|
||||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
../workstation-node.nix
|
||||
];
|
||||
|
||||
# Force NetworkManager off - we use useDHCP globally
|
||||
networking.networkmanager.enable = lib.mkForce false;
|
||||
|
||||
# Hyprland window manager
|
||||
programs.hyprland = {
|
||||
enable = true;
|
||||
xwayland.enable = true;
|
||||
};
|
||||
|
||||
# greetd display manager with tuigreet
|
||||
services.greetd = {
|
||||
enable = true;
|
||||
settings = {
|
||||
default_session = {
|
||||
command = "${pkgs.tuigreet}/bin/tuigreet --time --cmd Hyprland";
|
||||
user = "greeter";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Essential desktop services
|
||||
services.dbus.enable = true;
|
||||
|
||||
# polkit for privilege escalation
|
||||
security.polkit.enable = true;
|
||||
|
||||
# DNS resolution
|
||||
services.resolved.enable = true;
|
||||
|
||||
# Bluetooth support
|
||||
hardware.bluetooth = {
|
||||
enable = true;
|
||||
powerOnBoot = true;
|
||||
};
|
||||
services.blueman.enable = true;
|
||||
|
||||
# Audio with PipeWire
|
||||
security.rtkit.enable = true;
|
||||
services.pipewire = {
|
||||
enable = true;
|
||||
alsa.enable = true;
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
jack.enable = true;
|
||||
};
|
||||
|
||||
# direnv support
|
||||
programs.direnv.enable = true;
|
||||
|
||||
# Fonts
|
||||
fonts.packages = with pkgs; [
|
||||
noto-fonts
|
||||
noto-fonts-cjk-sans
|
||||
noto-fonts-color-emoji
|
||||
liberation_ttf
|
||||
fira-code
|
||||
fira-code-symbols
|
||||
nerd-fonts.caskaydia-mono
|
||||
];
|
||||
|
||||
# Environment variables for Wayland
|
||||
environment.sessionVariables = {
|
||||
NIXOS_OZONE_WL = "1";
|
||||
};
|
||||
|
||||
# Additional desktop packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
prusa-slicer
|
||||
];
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
checkpoint-sync-url = "https://beaconstate.info";
|
||||
};
|
||||
};
|
||||
environment.persistence."/persist".directories = [
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||
"/var/lib/private/lighthouse-mainnet"
|
||||
];
|
||||
}
|
||||
|
||||
@@ -6,8 +6,7 @@ let
|
||||
btrfs = "${btrfsPkg}/bin/btrfs";
|
||||
snapshotBackup = pkgs.writeScript "kopia-snapshot-backup" (builtins.readFile ./kopia-snapshot-backup.sh);
|
||||
backupScript = pkgs.writeShellScript "backup-persist" ''
|
||||
target_path="/persist"
|
||||
snapshot_path="$target_path/kopia-backup-snapshot"
|
||||
target_path="${config.custom.impermanence.persistPath}"
|
||||
KOPIA_CHECK_FOR_UPDATES=false
|
||||
|
||||
${kopia} repository connect server \
|
||||
@@ -16,18 +15,29 @@ let
|
||||
-p "$(cat ${config.sops.secrets.kopia.path})" \
|
||||
|| exit 1
|
||||
|
||||
[ -e "$snapshot_path" ] && ${btrfs} subvolume delete "$snapshot_path"
|
||||
# Check if target_path is on btrfs filesystem
|
||||
fs_type=$(stat -f -c %T "$target_path")
|
||||
|
||||
${btrfs} subvolume snapshot -r "$target_path" "$snapshot_path"
|
||||
if [ "$fs_type" = "btrfs" ]; then
|
||||
# On btrfs: use snapshot for consistency
|
||||
snapshot_path="$target_path/kopia-backup-snapshot"
|
||||
[ -e "$snapshot_path" ] && ${btrfs} subvolume delete "$snapshot_path"
|
||||
${btrfs} subvolume snapshot -r "$target_path" "$snapshot_path"
|
||||
|
||||
# --no-send-snapshot-path due to https://github.com/kopia/kopia/issues/4402
|
||||
# Exclude btrfs replication snapshots (they appear as empty dirs in the snapshot anyway)
|
||||
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" \
|
||||
--ignore "services@*" \
|
||||
--ignore "services-standby/services@*" \
|
||||
-- "$snapshot_path"
|
||||
# --no-send-snapshot-path due to https://github.com/kopia/kopia/issues/4402
|
||||
# Exclude btrfs replication snapshots (they appear as empty dirs in the snapshot anyway)
|
||||
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" \
|
||||
--ignore "services@*" \
|
||||
--ignore "services-standby/services@*" \
|
||||
-- "$snapshot_path"
|
||||
|
||||
${btrfs} subvolume delete "$snapshot_path"
|
||||
else
|
||||
# On non-btrfs (e.g., ext4): backup directly without snapshot
|
||||
${kopia} snapshot create --no-send-snapshot-report --override-source "$target_path" \
|
||||
-- "$target_path"
|
||||
fi
|
||||
|
||||
${btrfs} subvolume delete "$snapshot_path"
|
||||
${kopia} repository disconnect
|
||||
'';
|
||||
in
|
||||
@@ -41,7 +51,7 @@ in
|
||||
services."backup-persist" = {
|
||||
description = "Backup persistent data with Kopia";
|
||||
serviceConfig = {
|
||||
type = "oneshot";
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart = "${backupScript}";
|
||||
};
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
./console.nix
|
||||
./cpufreq.nix
|
||||
./flakes.nix
|
||||
./impermanence-options.nix
|
||||
./kernel.nix
|
||||
./locale.nix
|
||||
./network.nix
|
||||
|
||||
14
common/global/impermanence-options.nix
Normal file
14
common/global/impermanence-options.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Define impermanence options that need to be available to all modules
|
||||
# The actual impermanence implementation is in common/impermanence.nix or common/impermanence-tmpfs.nix
|
||||
|
||||
options.custom.impermanence.persistPath = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/persist";
|
||||
description = "Path where persistent data is stored (e.g., /persist for btrfs, /nix/persist for tmpfs)";
|
||||
};
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
{ lib, config, ... }:
|
||||
{
|
||||
networking = {
|
||||
useDHCP = true;
|
||||
@@ -9,7 +10,7 @@
|
||||
'';
|
||||
};
|
||||
|
||||
environment.persistence."/persist" = {
|
||||
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||
directories = [ "/var/db/dhcpcd" ];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
sops = {
|
||||
# sometimes the impermanence bind mount is stopped when sops needs these
|
||||
age.sshKeyPaths = [
|
||||
"/persist/etc/ssh/ssh_host_ed25519_key"
|
||||
"/persist/etc/ssh/ssh_host_rsa_key"
|
||||
"${config.custom.impermanence.persistPath}/etc/ssh/ssh_host_ed25519_key"
|
||||
];
|
||||
defaultSopsFile = ./../../secrets/common.yaml;
|
||||
secrets = {
|
||||
|
||||
@@ -22,6 +22,6 @@ in
|
||||
config = mkIf cfg.enable {
|
||||
services.tailscaleAutoconnect.enable = true;
|
||||
services.tailscale.package = pkgs.unstable.tailscale;
|
||||
environment.persistence."/persist".directories = [ "/var/lib/tailscale" ];
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [ "/var/lib/tailscale" ];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.glusterfs ];
|
||||
|
||||
fileSystems."/data/compute" = {
|
||||
device = "192.168.1.71:/compute";
|
||||
fsType = "glusterfs";
|
||||
options = [
|
||||
"backup-volfile-servers=192.168.1.72:192.168.1.73"
|
||||
"_netdev"
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.glusterfs = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [ "/var/lib/glusterd" ];
|
||||
|
||||
# TODO: each volume needs its own port starting at 49152
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
24007
|
||||
24008
|
||||
24009
|
||||
49152
|
||||
49153
|
||||
49154
|
||||
49155
|
||||
];
|
||||
}
|
||||
8
common/ham-radio.nix
Normal file
8
common/ham-radio.nix
Normal file
@@ -0,0 +1,8 @@
|
||||
# ABOUTME: Ham radio tools profile for amateur radio operators.
|
||||
# ABOUTME: Provides CLI tools for logging and processing ham radio contacts.
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.custom.flecli
|
||||
];
|
||||
}
|
||||
30
common/impermanence-common.nix
Normal file
30
common/impermanence-common.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Common impermanence configuration shared by both btrfs and tmpfs variants
|
||||
# This module should be imported by impermanence.nix or impermanence-tmpfs.nix
|
||||
# The option custom.impermanence.persistPath is defined in common/global/impermanence-options.nix
|
||||
|
||||
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||
directories = [
|
||||
"/var/lib/nixos"
|
||||
"/home"
|
||||
];
|
||||
files = [
|
||||
"/etc/machine-id"
|
||||
"/etc/ssh/ssh_host_ed25519_key"
|
||||
"/etc/ssh/ssh_host_ed25519_key.pub"
|
||||
"/etc/ssh/ssh_host_rsa_key"
|
||||
"/etc/ssh/ssh_host_rsa_key.pub"
|
||||
];
|
||||
};
|
||||
|
||||
users.mutableUsers = false;
|
||||
|
||||
security.sudo.extraConfig = ''
|
||||
Defaults lecture = never
|
||||
'';
|
||||
}
|
||||
30
common/impermanence-tmpfs.nix
Normal file
30
common/impermanence-tmpfs.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Impermanence configuration for tmpfs root filesystem
|
||||
# Used for systems with tmpfs root (e.g., Raspberry Pi with SD card)
|
||||
# Root is in-memory and wiped on every boot
|
||||
# Persistent data is stored in /nix/persist (directory on the /nix partition)
|
||||
|
||||
# Import common impermanence configuration
|
||||
imports = [ ./impermanence-common.nix ];
|
||||
|
||||
config = {
|
||||
# Use /nix/persist for tmpfs-based impermanence
|
||||
custom.impermanence.persistPath = "/nix/persist";
|
||||
|
||||
# tmpfs root filesystem
|
||||
fileSystems."/" = {
|
||||
device = "none";
|
||||
fsType = "tmpfs";
|
||||
options = [
|
||||
"defaults"
|
||||
"size=2G"
|
||||
"mode=755"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
pkgs,
|
||||
inputs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
@@ -9,28 +8,22 @@ let
|
||||
cfg = config.custom.impermanence;
|
||||
in
|
||||
{
|
||||
# Import common impermanence configuration
|
||||
imports = [ ./impermanence-common.nix ];
|
||||
|
||||
options.custom.impermanence = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Enable impermanent root fs";
|
||||
description = "Enable impermanent root fs with btrfs subvolume rollback";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.persistence = {
|
||||
"/persist" = {
|
||||
directories = [ "/var/lib/nixos" ];
|
||||
files = [
|
||||
"/etc/machine-id"
|
||||
"/etc/ssh/ssh_host_ed25519_key"
|
||||
"/etc/ssh/ssh_host_ed25519_key.pub"
|
||||
"/etc/ssh/ssh_host_rsa_key"
|
||||
"/etc/ssh/ssh_host_rsa_key.pub"
|
||||
];
|
||||
};
|
||||
};
|
||||
# Use /persist for btrfs-based impermanence
|
||||
custom.impermanence.persistPath = "/persist";
|
||||
|
||||
# Btrfs-specific filesystem options
|
||||
fileSystems."/".options = [
|
||||
"compress=zstd"
|
||||
"noatime"
|
||||
@@ -50,17 +43,7 @@ in
|
||||
];
|
||||
fileSystems."/var/log".neededForBoot = true;
|
||||
|
||||
users.mutableUsers = false;
|
||||
|
||||
# rollback results in sudo lectures after each reboot
|
||||
security.sudo.extraConfig = ''
|
||||
Defaults lecture = never
|
||||
'';
|
||||
|
||||
# needed for allowOther in the home-manager impermanence config
|
||||
programs.fuse.userAllowOther = true;
|
||||
|
||||
# reset / at each boot
|
||||
# Btrfs subvolume rollback at each boot
|
||||
# Note `lib.mkBefore` is used instead of `lib.mkAfter` here.
|
||||
boot.initrd.postDeviceCommands = pkgs.lib.mkBefore ''
|
||||
mkdir /mnt
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
# Note: unattended-encryption is NOT included by default - add it explicitly where needed
|
||||
imports = [
|
||||
./impermanence.nix
|
||||
./resource-limits.nix
|
||||
./sshd.nix
|
||||
./user-ppetru.nix
|
||||
./systemd-boot.nix
|
||||
|
||||
32
common/netconsole-receiver.nix
Normal file
32
common/netconsole-receiver.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options.services.netconsoleReceiver = {
|
||||
enable = lib.mkEnableOption "netconsole UDP receiver";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 6666;
|
||||
description = "UDP port to listen on for netconsole messages";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.netconsoleReceiver.enable {
|
||||
systemd.services.netconsole-receiver = {
|
||||
description = "Netconsole UDP receiver";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.socat}/bin/socat -u UDP-LISTEN:${toString config.services.netconsoleReceiver.port},fork STDOUT";
|
||||
StandardOutput = "journal";
|
||||
StandardError = "journal";
|
||||
SyslogIdentifier = "netconsole";
|
||||
Restart = "always";
|
||||
RestartSec = "5s";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -9,12 +9,17 @@
|
||||
# The mount is established at boot time and persists - no auto-unmount.
|
||||
# This prevents issues with Docker bind mounts seeing empty automount stubs.
|
||||
|
||||
imports = [
|
||||
./wait-for-dns-ready.nix
|
||||
];
|
||||
|
||||
fileSystems."/data/services" = {
|
||||
device = "data-services.service.consul:/persist/services";
|
||||
fsType = "nfs";
|
||||
options = [
|
||||
"nofail" # Don't block boot if mount fails
|
||||
"x-systemd.mount-timeout=30s" # Timeout for mount attempts
|
||||
"x-systemd.after=wait-for-dns-ready.service" # Wait for DNS to actually work
|
||||
"_netdev" # Network filesystem (wait for network)
|
||||
];
|
||||
};
|
||||
|
||||
@@ -24,7 +24,7 @@ in
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Persist root SSH directory for replication key
|
||||
environment.persistence."/persist" = {
|
||||
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||
directories = [
|
||||
"/root/.ssh"
|
||||
];
|
||||
@@ -103,11 +103,14 @@ in
|
||||
] ++ (lib.forEach cfg.standbys (standby: {
|
||||
"replicate-services-to-${standby}" = {
|
||||
description = "Replicate /persist/services to ${standby}";
|
||||
path = [ pkgs.btrfs-progs pkgs.openssh pkgs.coreutils pkgs.findutils pkgs.gnugrep ];
|
||||
path = [ pkgs.btrfs-progs pkgs.openssh pkgs.coreutils pkgs.findutils pkgs.gnugrep pkgs.curl ];
|
||||
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
|
||||
START_TIME=$(date +%s)
|
||||
REPLICATION_SUCCESS=0
|
||||
|
||||
SSH_KEY="/persist/root/.ssh/btrfs-replication"
|
||||
if [ ! -f "$SSH_KEY" ]; then
|
||||
echo "ERROR: SSH key not found at $SSH_KEY"
|
||||
@@ -130,15 +133,19 @@ in
|
||||
echo "Attempting incremental send from $(basename $PREV_LOCAL) to ${standby}"
|
||||
|
||||
# Try incremental send, if it fails (e.g., parent missing on receiver), fall back to full
|
||||
if btrfs send -p "$PREV_LOCAL" "$SNAPSHOT_PATH" | \
|
||||
# Use -c to help with broken Received UUID chains
|
||||
if btrfs send -p "$PREV_LOCAL" -c "$PREV_LOCAL" "$SNAPSHOT_PATH" | \
|
||||
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||
"btrfs receive /persist/services-standby"; then
|
||||
echo "Incremental send completed successfully"
|
||||
REPLICATION_SUCCESS=1
|
||||
else
|
||||
echo "Incremental send failed (likely missing parent on receiver), falling back to full send"
|
||||
# Plain full send without clone source (receiver may have no snapshots)
|
||||
btrfs send "$SNAPSHOT_PATH" | \
|
||||
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||
"btrfs receive /persist/services-standby"
|
||||
REPLICATION_SUCCESS=1
|
||||
fi
|
||||
else
|
||||
# First snapshot, do full send
|
||||
@@ -146,10 +153,28 @@ in
|
||||
btrfs send "$SNAPSHOT_PATH" | \
|
||||
ssh -i "$SSH_KEY" -o StrictHostKeyChecking=accept-new root@${standby} \
|
||||
"btrfs receive /persist/services-standby"
|
||||
REPLICATION_SUCCESS=1
|
||||
fi
|
||||
|
||||
# Cleanup old snapshots on sender (keep last 24 hours = 288 snapshots at 5min intervals)
|
||||
find /persist -maxdepth 1 -name 'services@*' -mmin +1440 -exec btrfs subvolume delete {} \;
|
||||
# Cleanup old snapshots on sender (keep last 10 snapshots, sorted by name/timestamp)
|
||||
ls -1d /persist/services@* 2>/dev/null | sort | head -n -10 | xargs -r btrfs subvolume delete
|
||||
|
||||
# Calculate metrics
|
||||
END_TIME=$(date +%s)
|
||||
DURATION=$((END_TIME - START_TIME))
|
||||
SNAPSHOT_COUNT=$(ls -1d /persist/services@* 2>/dev/null | wc -l)
|
||||
|
||||
# Push metrics to Prometheus pushgateway
|
||||
cat <<METRICS | curl -s --data-binary @- http://pushgateway.service.consul:9091/metrics/job/nfs_replication/instance/${standby} || true
|
||||
# TYPE nfs_replication_last_success_timestamp gauge
|
||||
nfs_replication_last_success_timestamp $END_TIME
|
||||
# TYPE nfs_replication_duration_seconds gauge
|
||||
nfs_replication_duration_seconds $DURATION
|
||||
# TYPE nfs_replication_snapshot_count gauge
|
||||
nfs_replication_snapshot_count $SNAPSHOT_COUNT
|
||||
# TYPE nfs_replication_success gauge
|
||||
nfs_replication_success $REPLICATION_SUCCESS
|
||||
METRICS
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
|
||||
@@ -39,17 +39,28 @@ in
|
||||
noCheck = true;
|
||||
};
|
||||
|
||||
# Cleanup old snapshots on standby (keep last 48 hours for safety)
|
||||
# Cleanup old snapshots on standby (keep last 10 snapshots)
|
||||
systemd.services.cleanup-services-standby-snapshots = {
|
||||
description = "Cleanup old btrfs snapshots in services-standby";
|
||||
path = [ pkgs.btrfs-progs pkgs.findutils ];
|
||||
|
||||
path = [ pkgs.btrfs-progs pkgs.findutils pkgs.coreutils pkgs.curl ];
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
# Keep last 48 hours of snapshots (576 snapshots at 5min intervals)
|
||||
find /persist/services-standby -maxdepth 1 -name 'services@*' -mmin +2880 -exec btrfs subvolume delete {} \; || true
|
||||
'';
|
||||
|
||||
# Cleanup old snapshots on standby (keep last 10 snapshots, sorted by name/timestamp)
|
||||
ls -1d /persist/services-standby/services@* 2>/dev/null | sort | head -n -10 | xargs -r btrfs subvolume delete || true
|
||||
|
||||
# Calculate metrics
|
||||
CLEANUP_TIME=$(date +%s)
|
||||
SNAPSHOT_COUNT=$(ls -1d /persist/services-standby/services@* 2>/dev/null | wc -l)
|
||||
|
||||
# Push metrics to Prometheus pushgateway
|
||||
cat <<METRICS | curl -s --data-binary @- http://pushgateway.service.consul:9091/metrics/job/nfs_standby_cleanup/instance/$(hostname) || true
|
||||
# TYPE nfs_standby_snapshot_count gauge
|
||||
nfs_standby_snapshot_count $SNAPSHOT_COUNT
|
||||
# TYPE nfs_standby_cleanup_last_run_timestamp gauge
|
||||
nfs_standby_cleanup_last_run_timestamp $CLEANUP_TIME
|
||||
METRICS
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
@@ -60,7 +71,7 @@ in
|
||||
description = "Timer for cleaning up old snapshots on standby";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
OnCalendar = "hourly";
|
||||
Persistent = true;
|
||||
};
|
||||
};
|
||||
|
||||
9
common/nomad-server.nix
Normal file
9
common/nomad-server.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ ... }:
|
||||
{
|
||||
# Enable server mode for both Consul and Nomad
|
||||
# Used by: c1, c2, c3 (quorum members)
|
||||
clusterRole = {
|
||||
consulServer = true;
|
||||
nomadServer = true;
|
||||
};
|
||||
}
|
||||
9
common/nomad-worker.nix
Normal file
9
common/nomad-worker.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ ... }:
|
||||
{
|
||||
# Enable Nomad client to run workloads
|
||||
# Includes: Nomad client, Docker plugin, host volumes, NFS mount dependencies
|
||||
# Used by: c1, c2, c3, zippy (all nodes that run Nomad jobs)
|
||||
imports = [
|
||||
./nomad.nix
|
||||
];
|
||||
}
|
||||
299
common/nomad.nix
299
common/nomad.nix
@@ -1,174 +1,177 @@
|
||||
# inspiration: https://github.com/astro/skyflake/blob/main/nixos-modules/nomad.nix
|
||||
{ pkgs, config, ... }:
|
||||
{ pkgs, config, lib, ... }:
|
||||
let
|
||||
servers = [
|
||||
"c1"
|
||||
"c2"
|
||||
"c3"
|
||||
];
|
||||
server_enabled = builtins.elem config.networking.hostName servers;
|
||||
in
|
||||
{
|
||||
services.nomad = {
|
||||
enable = true;
|
||||
# true breaks at least CSI volumes
|
||||
# TODO: consider fixing
|
||||
dropPrivileges = false;
|
||||
options.clusterRole.nomadServer = lib.mkEnableOption "Nomad server mode";
|
||||
|
||||
settings = {
|
||||
datacenter = "alo";
|
||||
config = {
|
||||
services.nomad = {
|
||||
enable = true;
|
||||
# true breaks at least CSI volumes
|
||||
# TODO: consider fixing
|
||||
dropPrivileges = false;
|
||||
|
||||
client = {
|
||||
enabled = true;
|
||||
server_join.retry_join = servers;
|
||||
host_network.tailscale = {
|
||||
interface = "tailscale0";
|
||||
cidr = "100.64.0.0/10";
|
||||
settings = {
|
||||
datacenter = "alo";
|
||||
|
||||
client = {
|
||||
enabled = true;
|
||||
server_join.retry_join = servers;
|
||||
host_network.tailscale = {
|
||||
interface = "tailscale0";
|
||||
cidr = "100.64.0.0/10";
|
||||
};
|
||||
host_volume = {
|
||||
services = {
|
||||
path = "/data/services";
|
||||
read_only = false;
|
||||
};
|
||||
nix-store = {
|
||||
path = "/nix/store";
|
||||
read_only = true;
|
||||
};
|
||||
sw = {
|
||||
path = "/run/current-system/sw";
|
||||
read_only = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
host_volume = {
|
||||
services = {
|
||||
path = "/data/services";
|
||||
read_only = false;
|
||||
};
|
||||
nix-store = {
|
||||
path = "/nix/store";
|
||||
read_only = true;
|
||||
};
|
||||
sw = {
|
||||
path = "/run/current-system/sw";
|
||||
read_only = true;
|
||||
};
|
||||
|
||||
server = {
|
||||
enabled = config.clusterRole.nomadServer;
|
||||
bootstrap_expect = (builtins.length servers + 2) / 2;
|
||||
server_join.retry_join = servers;
|
||||
};
|
||||
|
||||
telemetry = {
|
||||
collection_interval = "1s";
|
||||
disable_hostname = true;
|
||||
prometheus_metrics = true;
|
||||
publish_allocation_metrics = true;
|
||||
publish_node_metrics = true;
|
||||
};
|
||||
};
|
||||
|
||||
server = {
|
||||
enabled = server_enabled;
|
||||
bootstrap_expect = (builtins.length servers + 2) / 2;
|
||||
server_join.retry_join = servers;
|
||||
};
|
||||
extraSettingsPaths = [ "/etc/nomad-alo.json" ];
|
||||
};
|
||||
|
||||
telemetry = {
|
||||
collection_interval = "1s";
|
||||
disable_hostname = true;
|
||||
prometheus_metrics = true;
|
||||
publish_allocation_metrics = true;
|
||||
publish_node_metrics = true;
|
||||
# NFS mount dependency configuration for Nomad:
|
||||
#
|
||||
# Problem: Docker bind mounts need the real NFS mount, not an empty stub.
|
||||
# If Nomad starts before NFS is mounted, containers get empty directories.
|
||||
#
|
||||
# Solution: Use soft dependencies (wants/after) with health-checking recovery.
|
||||
# - wants: Nomad wants the mount, but won't be killed if it goes away
|
||||
# - after: Nomad waits for mount to be attempted before starting
|
||||
# - ExecStartPre with findmnt: Blocks Nomad start until mount is actually active
|
||||
#
|
||||
# This prevents Docker race conditions while allowing:
|
||||
# - Boot to proceed if NFS unavailable (Nomad fails to start, systemd retries)
|
||||
# - Nomad to keep running if NFS temporarily fails (containers may error)
|
||||
# - Recovery service to auto-restart Nomad when NFS comes back or becomes stale
|
||||
#
|
||||
# Note: Mount uses Consul DNS which resolves at mount time. If NFS server
|
||||
# moves to different IP, mount becomes stale and needs remount.
|
||||
# The recovery service handles this by detecting stale mounts and restarting Nomad.
|
||||
systemd.services.nomad = {
|
||||
wants = [ "network-online.target" "data-services.mount" ];
|
||||
after = [ "data-services.mount" ];
|
||||
serviceConfig.ExecStartPre = "${pkgs.util-linux}/bin/findmnt --mountpoint /data/services";
|
||||
};
|
||||
|
||||
# Recovery service: automatically restart Nomad when NFS mount needs attention
|
||||
# This handles scenarios where:
|
||||
# - NFS server was down during boot (mount failed, Nomad hit start-limit)
|
||||
# - NFS server failed over to different host with new IP (mount went stale)
|
||||
# - Network outage temporarily broke the mount
|
||||
#
|
||||
# The timer runs every 30s and checks:
|
||||
# 1. Is mount healthy (exists and accessible)?
|
||||
# 2. If mount is stale/inaccessible → restart Nomad (triggers remount)
|
||||
# 3. If mount is healthy but Nomad failed → restart Nomad (normal recovery)
|
||||
systemd.services.nomad-mount-watcher = {
|
||||
description = "Restart Nomad when NFS mount needs attention";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = pkgs.writeShellScript "nomad-mount-watcher" ''
|
||||
# Check if mount point exists
|
||||
if ! ${pkgs.util-linux}/bin/findmnt --mountpoint /data/services >/dev/null 2>&1; then
|
||||
exit 0 # Mount not present, nothing to do
|
||||
fi
|
||||
|
||||
# Check if mount is actually accessible (not stale)
|
||||
# Use timeout to avoid hanging on stale NFS mounts
|
||||
if ! ${pkgs.coreutils}/bin/timeout 5s ${pkgs.coreutils}/bin/stat /data/services >/dev/null 2>&1; then
|
||||
echo "NFS mount is stale or inaccessible. Restarting Nomad to trigger remount..."
|
||||
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Mount is healthy - check if Nomad needs recovery
|
||||
if ${pkgs.systemd}/bin/systemctl is-failed nomad.service >/dev/null 2>&1; then
|
||||
echo "NFS mount is healthy but Nomad is failed. Restarting Nomad..."
|
||||
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||
fi
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
extraSettingsPaths = [ "/etc/nomad-alo.json" ];
|
||||
};
|
||||
|
||||
# NFS mount dependency configuration for Nomad:
|
||||
#
|
||||
# Problem: Docker bind mounts need the real NFS mount, not an empty stub.
|
||||
# If Nomad starts before NFS is mounted, containers get empty directories.
|
||||
#
|
||||
# Solution: Use soft dependencies (wants/after) with health-checking recovery.
|
||||
# - wants: Nomad wants the mount, but won't be killed if it goes away
|
||||
# - after: Nomad waits for mount to be attempted before starting
|
||||
# - ExecStartPre with findmnt: Blocks Nomad start until mount is actually active
|
||||
#
|
||||
# This prevents Docker race conditions while allowing:
|
||||
# - Boot to proceed if NFS unavailable (Nomad fails to start, systemd retries)
|
||||
# - Nomad to keep running if NFS temporarily fails (containers may error)
|
||||
# - Recovery service to auto-restart Nomad when NFS comes back or becomes stale
|
||||
#
|
||||
# Note: Mount uses Consul DNS which resolves at mount time. If NFS server
|
||||
# moves to different IP, mount becomes stale and needs remount.
|
||||
# The recovery service handles this by detecting stale mounts and restarting Nomad.
|
||||
systemd.services.nomad = {
|
||||
wants = [ "network-online.target" "data-services.mount" ];
|
||||
after = [ "data-services.mount" ];
|
||||
serviceConfig.ExecStartPre = "${pkgs.util-linux}/bin/findmnt --mountpoint /data/services";
|
||||
};
|
||||
|
||||
# Recovery service: automatically restart Nomad when NFS mount needs attention
|
||||
# This handles scenarios where:
|
||||
# - NFS server was down during boot (mount failed, Nomad hit start-limit)
|
||||
# - NFS server failed over to different host with new IP (mount went stale)
|
||||
# - Network outage temporarily broke the mount
|
||||
#
|
||||
# The timer runs every 30s and checks:
|
||||
# 1. Is mount healthy (exists and accessible)?
|
||||
# 2. If mount is stale/inaccessible → restart Nomad (triggers remount)
|
||||
# 3. If mount is healthy but Nomad failed → restart Nomad (normal recovery)
|
||||
systemd.services.nomad-mount-watcher = {
|
||||
description = "Restart Nomad when NFS mount needs attention";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = pkgs.writeShellScript "nomad-mount-watcher" ''
|
||||
# Check if mount point exists
|
||||
if ! ${pkgs.util-linux}/bin/findmnt --mountpoint /data/services >/dev/null 2>&1; then
|
||||
exit 0 # Mount not present, nothing to do
|
||||
fi
|
||||
|
||||
# Check if mount is actually accessible (not stale)
|
||||
# Use timeout to avoid hanging on stale NFS mounts
|
||||
if ! ${pkgs.coreutils}/bin/timeout 5s ${pkgs.coreutils}/bin/stat /data/services >/dev/null 2>&1; then
|
||||
echo "NFS mount is stale or inaccessible. Restarting Nomad to trigger remount..."
|
||||
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Mount is healthy - check if Nomad needs recovery
|
||||
if ${pkgs.systemd}/bin/systemctl is-failed nomad.service >/dev/null 2>&1; then
|
||||
echo "NFS mount is healthy but Nomad is failed. Restarting Nomad..."
|
||||
${pkgs.systemd}/bin/systemctl restart nomad.service
|
||||
fi
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.nomad-mount-watcher = {
|
||||
description = "Timer for Nomad mount watcher";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "1min"; # First run 1min after boot
|
||||
OnUnitActiveSec = "30s"; # Then every 30s
|
||||
Unit = "nomad-mount-watcher.service";
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."nomad-alo.json".text = builtins.toJSON {
|
||||
plugin.docker.config = {
|
||||
allow_privileged = true;
|
||||
# for keepalived, though only really needing "NET_ADMIN","NET_BROADCAST","NET_RAW" on top of default
|
||||
# TODO: trim this down
|
||||
allow_caps = [ "all" ];
|
||||
volumes.enabled = true;
|
||||
extra_labels = [
|
||||
"job_name"
|
||||
"task_group_name"
|
||||
"task_name"
|
||||
"node_name"
|
||||
];
|
||||
systemd.timers.nomad-mount-watcher = {
|
||||
description = "Timer for Nomad mount watcher";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "1min"; # First run 1min after boot
|
||||
OnUnitActiveSec = "30s"; # Then every 30s
|
||||
Unit = "nomad-mount-watcher.service";
|
||||
};
|
||||
};
|
||||
|
||||
plugin.raw_exec.config.enabled = true;
|
||||
};
|
||||
environment.etc."nomad-alo.json".text = builtins.toJSON {
|
||||
plugin.docker.config = {
|
||||
allow_privileged = true;
|
||||
# for keepalived, though only really needing "NET_ADMIN","NET_BROADCAST","NET_RAW" on top of default
|
||||
# TODO: trim this down
|
||||
allow_caps = [ "all" ];
|
||||
volumes.enabled = true;
|
||||
extra_labels = [
|
||||
"job_name"
|
||||
"task_group_name"
|
||||
"task_name"
|
||||
"node_name"
|
||||
];
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
"/var/lib/docker"
|
||||
"/var/lib/nomad"
|
||||
];
|
||||
plugin.raw_exec.config.enabled = true;
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
nomad
|
||||
wander
|
||||
damon
|
||||
];
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||
"/var/lib/docker"
|
||||
"/var/lib/nomad"
|
||||
];
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts =
|
||||
if server_enabled then
|
||||
[
|
||||
4646
|
||||
4647
|
||||
4648
|
||||
]
|
||||
else
|
||||
[ 4646 ];
|
||||
allowedUDPPorts = if server_enabled then [ 4648 ] else [ ];
|
||||
environment.systemPackages = with pkgs; [
|
||||
nomad
|
||||
wander
|
||||
damon
|
||||
];
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts =
|
||||
if config.clusterRole.nomadServer then
|
||||
[
|
||||
4646
|
||||
4647
|
||||
4648
|
||||
]
|
||||
else
|
||||
[ 4646 ];
|
||||
allowedUDPPorts = if config.clusterRole.nomadServer then [ 4648 ] else [ ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
44
common/resource-limits.nix
Normal file
44
common/resource-limits.nix
Normal file
@@ -0,0 +1,44 @@
|
||||
{ ... }:
|
||||
{
|
||||
# Resource limits for user sessions to prevent system wedging
|
||||
#
|
||||
# Modern systemd/cgroups v2 approach to resource control (replaces ulimits).
|
||||
# Limits apply to all user sessions (SSH, GUI, etc.) but NOT to system services.
|
||||
#
|
||||
# Rationale:
|
||||
# - Prevents runaway user processes (nix builds, compiles, etc.) from consuming
|
||||
# all resources and making the system unresponsive
|
||||
# - System services (Nomad jobs, Consul, NFS, etc.) run outside user.slice and
|
||||
# are unaffected by these limits
|
||||
# - Ensures SSH access remains responsive even under heavy load
|
||||
#
|
||||
# CPU: Uses CPUWeight (not CPUQuota) so user sessions can use 100% when idle,
|
||||
# but system services get priority (1.25x) during contention
|
||||
# Memory: Soft limit at 90% (triggers pressure/reclaim), hard limit at 95%
|
||||
# Gives 5% warning buffer before OOM kills
|
||||
|
||||
systemd.slices.user = {
|
||||
sliceConfig = {
|
||||
# CPU weight: 80 vs default 100 for system services
|
||||
# When idle: user sessions use all available CPU
|
||||
# Under contention: system services get 1.25x CPU share
|
||||
CPUWeight = "80";
|
||||
|
||||
# Memory soft limit: triggers reclaim and memory pressure
|
||||
# User will notice slowdown but processes keep running
|
||||
MemoryHigh = "90%";
|
||||
|
||||
# Memory hard limit: OOM killer targets user.slice
|
||||
# 5% buffer between MemoryHigh and MemoryMax provides warning
|
||||
MemoryMax = "95%";
|
||||
|
||||
# Limit number of tasks (processes/threads)
|
||||
# Prevents fork bombs while still allowing nix builds
|
||||
TasksMax = "4096";
|
||||
|
||||
# Lower I/O priority slightly
|
||||
# System services get preference during I/O contention
|
||||
IOWeight = "90";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Server profile: Cluster-enabled system for server deployments
|
||||
# Extends cluster-node with server-specific configurations
|
||||
# Future: Add bare NixOS services here (mysql, postgres, etc.) when migrating from Nomad
|
||||
imports = [
|
||||
./cluster-node.nix
|
||||
];
|
||||
|
||||
# Server-specific configurations can be added here
|
||||
# Example (for future use):
|
||||
# services.mysql.enable = lib.mkDefault false;
|
||||
# services.postgresql.enable = lib.mkDefault false;
|
||||
}
|
||||
@@ -3,8 +3,6 @@
|
||||
enable = true;
|
||||
allowSFTP = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
PermitRootLogin = "prohibit-password"; # Allow root login with SSH keys only
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
boot.loader.systemd-boot = {
|
||||
enable = true;
|
||||
configurationLimit = 5;
|
||||
memtest86.enable = lib.mkIf (pkgs.stdenv.hostPlatform.system == "x86_64-linux") true;
|
||||
};
|
||||
boot.loader.efi.canTouchEfiVariables = true;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,9 @@
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCdZ9dHN+DamoyRAIS8v7Ph85KyJ9zYdgwoqkp7F+smEJEdDKboHE5LA49IDQk4cgkR5xNEMtxANpJm+AXNAhQOPVl/w57vI/Z+TBtSvDoj8LuAvKjmmrPfok2iyD2IIlbctcw8ypn1revZwDb1rBFefpbbZdr5h+75tVqqmNebzxk6UQsfL++lU8HscWwYKzxrrom5aJL6wxNTfy7/Htkt4FHzoKAc5gcB2KM/q0s6NvZzX9WtdHHwAR1kib2EekssjDM9VLecX75Xhtbp+LrHOJKRnxbIanXos4UZUzaJctdNTcOYzEVLvV0BCYaktbI+uVvJcC0qo28bXbHdS3rTGRu8CsykFneJXnrrRIJw7mYWhJSTV9bf+6j/lnFNAurbiYmd4SzaTgbGjj2j38Gr/CTsyv8Rho7P3QUWbRRZnn4a7eVPtjGagqwIwS59YDxRcOy2Wdsw35ry/N2G802V7Cr3hUqeaAIev2adtn4FaG72C8enacYUeACPEhi7TYdsDzuuyt31W7AQa5Te4Uda20rTa0Y9N5Lw85uGB2ebbdYWlO2CqI/m+xNYcPkKqL7zZILz782jDw1sxWd/RUbEgJNrWjsKZ7ybiEMmhpw5vLiMGOeqQWIT6cBCNjocmW0ocU+FBLhhioyrvuZOyacoEZLoklatsL0DMkvvkbT0Ew== petru@paler.net"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH+QbeQG/gTPJ2sIMPgZ3ZPEirVo5qX/carbZMKt50YN petru@happy"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIwBGVVoiKh/5/j9Z0ITvResWy+ZuB1afFUkkP/VZ9O ppetru@sparky"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOOQ2EcJ+T+7BItZl89oDYhq7ZW4B9KuQVCy2DuQaPKR ppetru@sparky"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFRYVOfrqk2nFSyiu7TzU23ql8D6TfXICFpMIEvPbNsc JuiceSSH"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINBIqK6+aPIbmviJPWP8PI/k8GmaC7RO8v2ENnsK8sJx ppetru@beefy"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
55
common/wait-for-dns-ready.nix
Normal file
55
common/wait-for-dns-ready.nix
Normal file
@@ -0,0 +1,55 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Service to wait for DNS resolution to be actually functional
|
||||
# This is needed because network-online.target and wait-online.service
|
||||
# don't guarantee DNS works - they only check that interfaces are configured.
|
||||
#
|
||||
# Problem: NFS mounts using Consul DNS names (data-services.service.consul)
|
||||
# fail at boot because DNS resolution isn't ready even though network is "online"
|
||||
#
|
||||
# Solution: Actively test DNS resolution before considering network truly ready
|
||||
|
||||
systemd.services.wait-for-dns-ready = {
|
||||
description = "Wait for DNS resolution to be functional";
|
||||
after = [
|
||||
"systemd-networkd-wait-online.service"
|
||||
"systemd-resolved.service"
|
||||
"network-online.target"
|
||||
];
|
||||
wants = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = pkgs.writeShellScript "wait-for-dns-ready" ''
|
||||
# Test DNS resolution by attempting to resolve data-services.service.consul
|
||||
# This ensures the full DNS path works: interface → gateway → Consul DNS
|
||||
|
||||
echo "Waiting for DNS resolution to be ready..."
|
||||
|
||||
for i in {1..30}; do
|
||||
# Use getent which respects /etc/nsswitch.conf and systemd-resolved
|
||||
if ${pkgs.glibc.bin}/bin/getent hosts data-services.service.consul >/dev/null 2>&1; then
|
||||
echo "DNS ready: data-services.service.consul resolved successfully"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Also test a public DNS name to distinguish between general DNS failure
|
||||
# vs Consul-specific issues (helpful for debugging)
|
||||
if ! ${pkgs.glibc.bin}/bin/getent hosts www.google.com >/dev/null 2>&1; then
|
||||
echo "Attempt $i/30: General DNS not working yet, waiting..."
|
||||
else
|
||||
echo "Attempt $i/30: General DNS works but Consul DNS not ready yet, waiting..."
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Warning: DNS not fully ready after 30 seconds"
|
||||
echo "NFS mounts with 'nofail' option will handle this gracefully"
|
||||
exit 0 # Don't block boot - let nofail mounts handle DNS failures
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
35
common/wifi.nix
Normal file
35
common/wifi.nix
Normal file
@@ -0,0 +1,35 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
sops.secrets.wifi-password-pi = {
|
||||
sopsFile = ./../secrets/wifi.yaml;
|
||||
};
|
||||
|
||||
networking.wireless = {
|
||||
enable = true;
|
||||
secretsFile = config.sops.secrets.wifi-password-pi.path;
|
||||
networks = {
|
||||
"pi" = {
|
||||
pskRaw = "ext:pi";
|
||||
};
|
||||
};
|
||||
# Only enable on wireless interface, not ethernet
|
||||
interfaces = [ "wlan0" ];
|
||||
};
|
||||
|
||||
# Prefer wifi over ethernet, but keep ethernet as fallback
|
||||
networking.dhcpcd.extraConfig = ''
|
||||
# Prefer wlan0 over ethernet interfaces
|
||||
interface wlan0
|
||||
metric 100
|
||||
|
||||
interface eth0
|
||||
metric 200
|
||||
'';
|
||||
|
||||
# Persist wireless configuration across reboots (for impermanence)
|
||||
environment.persistence.${config.custom.impermanence.persistPath} = {
|
||||
files = [
|
||||
"/etc/wpa_supplicant.conf"
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
{ pkgs, inputs, ... }:
|
||||
{
|
||||
# Workstation profile: Development workstation configuration
|
||||
# Extends server-node with development tools and emulation
|
||||
# Adds development tools and emulation on top of minimal-node
|
||||
imports = [
|
||||
./server-node.nix
|
||||
./minimal-node.nix
|
||||
./unattended-encryption.nix
|
||||
];
|
||||
|
||||
|
||||
55
docs/AUTH_SETUP.md
Normal file
55
docs/AUTH_SETUP.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Authentication Setup
|
||||
|
||||
SSO for homelab services using OIDC.
|
||||
|
||||
## Architecture
|
||||
|
||||
**Pocket ID** (`pocket-id.v.paler.net`) - Lightweight OIDC provider, data in `/data/services/pocket-id`
|
||||
|
||||
**Traefik** - Uses `traefik-oidc-auth` plugin (v0.16.0) to protect services
|
||||
- Plugin downloaded from GitHub at startup, cached in `/data/services/traefik/plugins-storage`
|
||||
- Middleware config in `/data/services/traefik/rules/middlewares.yml`
|
||||
- Protected services add tag: `traefik.http.routers.<name>.middlewares=oidc-auth@file`
|
||||
|
||||
## Flow
|
||||
|
||||
1. User hits protected service → Traefik intercepts
|
||||
2. Redirects to Pocket ID for login
|
||||
3. Pocket ID returns OIDC token
|
||||
4. Traefik validates and forwards with `X-Oidc-Username` header
|
||||
|
||||
## Protected Services
|
||||
|
||||
Use `oidc-auth@file` middleware (grep codebase for full list):
|
||||
- Wikis (TiddlyWiki instances)
|
||||
- Media stack (Radarr, Sonarr, Plex, etc.)
|
||||
- Infrastructure (Traefik dashboard, Loki, Jupyter, Unifi)
|
||||
|
||||
## Key Files
|
||||
|
||||
- `services/pocket-id.hcl` - OIDC provider
|
||||
- `services/traefik.hcl` - Plugin declaration
|
||||
- `/data/services/traefik/rules/middlewares.yml` - Middleware definitions (oidc-auth, simple-auth fallback)
|
||||
|
||||
## Cold Start Notes
|
||||
|
||||
- Traefik needs internet to download plugin on first start
|
||||
- Pocket ID needs `/data/services` NFS mounted
|
||||
- Pocket ID down = all protected services inaccessible
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Infinite redirects**: Check `TRUST_PROXY=true` on Pocket ID
|
||||
|
||||
**Plugin not loading**: Clear cache in `/data/services/traefik/plugins-storage/`, restart Traefik
|
||||
|
||||
**401 after login**: Verify client ID/secret in middlewares.yml matches Pocket ID client config
|
||||
|
||||
## Migration History
|
||||
|
||||
- Previous: Authentik with forwardAuth (removed Nov 2024)
|
||||
- Current: Pocket ID + traefik-oidc-auth (simpler, lighter)
|
||||
|
||||
---
|
||||
|
||||
*Manage users/clients via Pocket ID UI. Basic auth fallback available via `simple-auth` middleware.*
|
||||
206
docs/CICD_SETUP.md
Normal file
206
docs/CICD_SETUP.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# CI/CD Setup for Nomad Services
|
||||
|
||||
Guide for adding automated builds and deployments to a service.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### 1. Service Repository
|
||||
|
||||
Your service needs a `flake.nix` that exports a Docker image:
|
||||
|
||||
```nix
|
||||
{
|
||||
outputs = { self, nixpkgs, ... }: {
|
||||
# The workflow looks for this output by default
|
||||
dockerImage = pkgs.dockerTools.buildImage {
|
||||
name = "gitea.v.paler.net/alo/<service>";
|
||||
tag = "latest";
|
||||
# ... image config
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Important**: Use `extraCommands` instead of `runAsRoot` in your Docker build - the CI runner doesn't have KVM.
|
||||
|
||||
### 2. Nomad Job
|
||||
|
||||
Your job in `services/<name>.hcl` needs:
|
||||
|
||||
```hcl
|
||||
job "<service>" {
|
||||
# Required: UUID changes trigger deployments
|
||||
meta {
|
||||
uuid = uuidv4()
|
||||
}
|
||||
|
||||
# Required: enables deployment tracking and auto-rollback
|
||||
update {
|
||||
max_parallel = 1
|
||||
health_check = "checks"
|
||||
min_healthy_time = "30s"
|
||||
healthy_deadline = "5m"
|
||||
auto_revert = true
|
||||
}
|
||||
|
||||
# Required: pulls new image on each deployment
|
||||
task "app" {
|
||||
config {
|
||||
force_pull = true
|
||||
}
|
||||
|
||||
# Recommended: health check for deployment validation
|
||||
service {
|
||||
check {
|
||||
type = "http"
|
||||
path = "/healthz"
|
||||
interval = "10s"
|
||||
timeout = "5s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Create Workflow
|
||||
|
||||
Add `.gitea/workflows/deploy.yaml` to your service repo:
|
||||
|
||||
```yaml
|
||||
name: Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
uses: alo/alo-cluster/.gitea/workflows/deploy-nomad.yaml@master
|
||||
with:
|
||||
service_name: <your-service> # Must match Nomad job ID
|
||||
secrets: inherit
|
||||
```
|
||||
|
||||
### 2. Add Secrets
|
||||
|
||||
In Gitea → Your Repo → Settings → Actions → Secrets, add:
|
||||
|
||||
| Secret | Value |
|
||||
|--------|-------|
|
||||
| `REGISTRY_USERNAME` | Your Gitea username |
|
||||
| `REGISTRY_PASSWORD` | Gitea access token with `packages:write` |
|
||||
| `NOMAD_ADDR` | `http://nomad.service.consul:4646` |
|
||||
|
||||
### 3. Push
|
||||
|
||||
Push to `master` branch. The workflow will:
|
||||
1. Build your Docker image with Nix
|
||||
2. Push to Gitea registry
|
||||
3. Update the Nomad job to trigger deployment
|
||||
4. Monitor until deployment succeeds or fails
|
||||
|
||||
## Workflow Options
|
||||
|
||||
The shared workflow accepts these inputs:
|
||||
|
||||
| Input | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `service_name` | (required) | Nomad job ID |
|
||||
| `flake_output` | `dockerImage` | Flake output to build |
|
||||
| `registry` | `gitea.v.paler.net` | Container registry |
|
||||
|
||||
Example with custom flake output:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
deploy:
|
||||
uses: alo/alo-cluster/.gitea/workflows/deploy-nomad.yaml@master
|
||||
with:
|
||||
service_name: myservice
|
||||
flake_output: packages.x86_64-linux.docker
|
||||
secrets: inherit
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
```
|
||||
Push to master
|
||||
↓
|
||||
Build: nix build .#dockerImage
|
||||
↓
|
||||
Push: skopeo → gitea.v.paler.net/alo/<service>:latest
|
||||
↓
|
||||
Deploy: Update job meta.uuid → Nomad creates deployment
|
||||
↓
|
||||
Monitor: Poll deployment status for up to 5 minutes
|
||||
↓
|
||||
Success: Deployment healthy
|
||||
OR
|
||||
Failure: Nomad auto-reverts to previous version
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Build fails with KVM error
|
||||
|
||||
```
|
||||
Required system: 'x86_64-linux' with features {kvm}
|
||||
```
|
||||
|
||||
Use `extraCommands` instead of `runAsRoot` in your `docker.nix`:
|
||||
|
||||
```nix
|
||||
# Bad - requires KVM
|
||||
runAsRoot = ''
|
||||
mkdir -p /tmp
|
||||
'';
|
||||
|
||||
# Good - no KVM needed
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
chmod 1777 tmp
|
||||
'';
|
||||
```
|
||||
|
||||
### No deployment created
|
||||
|
||||
Ensure your Nomad job has the `update` stanza with `auto_revert = true`.
|
||||
|
||||
### Image not updating
|
||||
|
||||
Check that `force_pull = true` is set in the Nomad job's Docker config.
|
||||
|
||||
### Deployment fails health checks
|
||||
|
||||
- Check your `/healthz` endpoint works
|
||||
- Increase `healthy_deadline` if startup is slow
|
||||
- Check `nomad alloc logs <alloc-id>` for errors
|
||||
|
||||
### Workflow can't access alo-cluster
|
||||
|
||||
If Gitea can't pull the reusable workflow, you may need to make alo-cluster public or use a token. As a fallback, copy the workflow content directly.
|
||||
|
||||
## Manual Deployment
|
||||
|
||||
If CI fails, you can deploy manually:
|
||||
|
||||
```bash
|
||||
cd <service-repo>
|
||||
nix build .#dockerImage
|
||||
skopeo copy --dest-authfile ~/.docker/config.json \
|
||||
docker-archive:result \
|
||||
docker://gitea.v.paler.net/alo/<service>:latest
|
||||
nomad run /path/to/alo-cluster/services/<service>.hcl
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
Nomad auto-reverts on health check failure. For manual rollback:
|
||||
|
||||
```bash
|
||||
nomad job history <service> # List versions
|
||||
nomad job revert <service> <version> # Revert to specific version
|
||||
```
|
||||
288
docs/DIFF_CONFIGS.md
Normal file
288
docs/DIFF_CONFIGS.md
Normal file
@@ -0,0 +1,288 @@
|
||||
# Configuration Diff Tool
|
||||
|
||||
Tool to compare all NixOS host configurations between current working tree and HEAD commit.
|
||||
|
||||
## Purpose
|
||||
|
||||
Before committing changes (especially refactors), verify that you haven't accidentally broken existing host configurations. This tool:
|
||||
- Builds all host configurations in current state (with uncommitted changes)
|
||||
- Builds all host configurations at HEAD (last commit)
|
||||
- Uses `nvd` to show readable diffs for each host
|
||||
- Highlights which hosts changed and which didn't
|
||||
|
||||
## Usage
|
||||
|
||||
### Prerequisites
|
||||
|
||||
The script requires `nvd` to be in PATH. Use either:
|
||||
|
||||
**Option 1: direnv (recommended)**
|
||||
```bash
|
||||
# Allow direnv in the repository (one-time setup)
|
||||
direnv allow
|
||||
|
||||
# direnv will automatically load the dev shell when you cd into the directory
|
||||
cd /home/ppetru/projects/alo-cluster
|
||||
# nvd is now in PATH
|
||||
```
|
||||
|
||||
**Option 2: nix develop**
|
||||
```bash
|
||||
# Enter dev shell manually
|
||||
nix develop
|
||||
|
||||
# Now run the script
|
||||
./scripts/diff-configs.sh
|
||||
```
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
# Compare all hosts (summary)
|
||||
./scripts/diff-configs.sh
|
||||
|
||||
# Compare with detailed path listing
|
||||
./scripts/diff-configs.sh -v c1
|
||||
|
||||
# Compare with content diffs of changed files (deep mode)
|
||||
./scripts/diff-configs.sh --deep c1
|
||||
|
||||
# Compare only x86_64 hosts (avoid slow ARM cross-compilation)
|
||||
./scripts/diff-configs.sh c1 c2 c3 zippy chilly sparky
|
||||
|
||||
# Verbose mode with multiple hosts
|
||||
./scripts/diff-configs.sh --verbose c1 c2 c3
|
||||
|
||||
# Via flake app
|
||||
nix run .#diff-configs
|
||||
|
||||
# Show help
|
||||
./scripts/diff-configs.sh --help
|
||||
```
|
||||
|
||||
### Typical Workflow
|
||||
|
||||
```bash
|
||||
# 1. Make changes to configurations
|
||||
vim common/impermanence.nix
|
||||
|
||||
# 2. Stage changes (required for flake to see them)
|
||||
git add common/impermanence.nix
|
||||
|
||||
# 3. Check what would change if you committed now
|
||||
# For quick feedback, compare only x86_64 hosts first:
|
||||
./scripts/diff-configs.sh c1 c2 c3 zippy chilly sparky
|
||||
|
||||
# 4. Review output, make adjustments if needed
|
||||
|
||||
# 5. If changes look good and affect ARM hosts, check those too:
|
||||
./scripts/diff-configs.sh stinky alo-cloud-1
|
||||
|
||||
# 6. Commit when satisfied
|
||||
git commit -m "Refactor impermanence config"
|
||||
```
|
||||
|
||||
## Output Explanation
|
||||
|
||||
### No Changes
|
||||
```
|
||||
━━━ c1 ━━━
|
||||
Building current... done
|
||||
Building HEAD... done
|
||||
✓ No changes
|
||||
```
|
||||
This host's configuration is identical between current and HEAD.
|
||||
|
||||
### Changes Detected
|
||||
```
|
||||
━━━ stinky ━━━
|
||||
Building current... done
|
||||
Building HEAD... done
|
||||
⚠ Configuration changed
|
||||
|
||||
<<< /nix/store/abc-nixos-system-stinky-25.05 (HEAD)
|
||||
>>> /nix/store/xyz-nixos-system-stinky-25.05 (current)
|
||||
|
||||
Version changes:
|
||||
[C] octoprint: 1.9.3 -> 1.10.0
|
||||
[A+] libcamera: ∅ -> 0.1.0
|
||||
Closure size: 1500 -> 1520 (5 paths added, 2 paths removed, +3, +15.2 MB)
|
||||
```
|
||||
|
||||
Legend:
|
||||
- `[C]` - Changed package version
|
||||
- `[A+]` - Added package
|
||||
- `[R-]` - Removed package
|
||||
- `[U.]` - Updated (same version, rebuilt)
|
||||
|
||||
### Verbose Mode (--verbose)
|
||||
|
||||
With `-v` or `--verbose`, also shows the actual store paths that changed:
|
||||
|
||||
```
|
||||
━━━ c1 ━━━
|
||||
Building current... done
|
||||
Building HEAD... done
|
||||
⚠ Configuration changed
|
||||
|
||||
[nvd summary as above]
|
||||
|
||||
Changed store paths:
|
||||
Removed (17 paths):
|
||||
- config.fish
|
||||
- system-units
|
||||
- home-manager-generation
|
||||
- etc-fuse.conf
|
||||
... and 13 more
|
||||
|
||||
Added (17 paths):
|
||||
- config.fish
|
||||
- system-units
|
||||
- home-manager-generation
|
||||
- etc-fuse.conf
|
||||
... and 13 more
|
||||
```
|
||||
|
||||
This is useful when nvd shows "No version changes" but paths still changed (e.g., refactors that rebuild config files).
|
||||
|
||||
### Deep Mode (--deep)
|
||||
|
||||
With `-d` or `--deep`, shows actual content diffs of changed files within store paths (implies verbose):
|
||||
|
||||
```
|
||||
━━━ c1 ━━━
|
||||
Building current... done
|
||||
Building HEAD... done
|
||||
⚠ Configuration changed
|
||||
|
||||
[nvd summary and path listing as above]
|
||||
|
||||
Content diffs of changed files:
|
||||
|
||||
▸ etc-fuse.conf
|
||||
@@ -1,2 +1,2 @@
|
||||
-user_allow_other
|
||||
+#user_allow_other
|
||||
mount_max = 1000
|
||||
|
||||
▸ nixos-system-c1-25.05
|
||||
activate:
|
||||
@@ -108,7 +108,7 @@
|
||||
echo "setting up /etc..."
|
||||
-/nix/store/...-perl/bin/perl /nix/store/...-setup-etc.pl /nix/store/abc-etc/etc
|
||||
+/nix/store/...-perl/bin/perl /nix/store/...-setup-etc.pl /nix/store/xyz-etc/etc
|
||||
|
||||
▸ unit-dbus.service
|
||||
dbus.service:
|
||||
@@ -1,5 +1,5 @@
|
||||
[Service]
|
||||
+Environment="LD_LIBRARY_PATH=/nix/store/.../systemd/lib"
|
||||
Environment="LOCALE_ARCHIVE=..."
|
||||
```
|
||||
|
||||
**What it shows**:
|
||||
- Matches changed paths by basename (e.g., both have "config.fish")
|
||||
- Diffs important files: activate scripts, etc/*, *.conf, *.fish, *.service, *.nix
|
||||
- Shows unified diff format (lines added/removed)
|
||||
- Limits to first 50 lines per file
|
||||
|
||||
**When to use**:
|
||||
- When you need to know **what exactly changed** in config files
|
||||
- Debugging unexpected configuration changes
|
||||
- Reviewing refactors that don't change package versions
|
||||
- Understanding why a host rebuilt despite "No version changes"
|
||||
|
||||
### Build Failures
|
||||
```
|
||||
━━━ broken-host ━━━
|
||||
Building current... FAILED
|
||||
Error: attribute 'foo' missing
|
||||
```
|
||||
If a host fails to build, the error is shown and the script continues with other hosts.
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Discovers hosts**: Queries `deploy.nodes` from flake to get all configured hosts
|
||||
2. **Creates worktree**: Uses `git worktree` to check out HEAD in a temporary directory
|
||||
3. **Builds configurations**: Builds `config.system.build.toplevel` for each host in both locations
|
||||
4. **Compares with nvd**: Runs `nvd diff` to show package-level changes
|
||||
5. **Cleans up**: Removes temporary worktree automatically
|
||||
|
||||
## Important Notes
|
||||
|
||||
### Git Staging Required
|
||||
|
||||
Flakes only evaluate files that are tracked by git. To make changes visible:
|
||||
```bash
|
||||
# Stage new files
|
||||
git add new-file.nix
|
||||
|
||||
# Stage changes to existing files
|
||||
git add modified-file.nix
|
||||
|
||||
# Or stage everything
|
||||
git add .
|
||||
```
|
||||
|
||||
Unstaged changes to tracked files **are** visible (flake uses working tree content).
|
||||
|
||||
### Performance
|
||||
|
||||
- First run may be slow (building all configurations)
|
||||
- Subsequent runs benefit from Nix evaluation cache
|
||||
- Typical runtime: 1-5 minutes depending on changes
|
||||
- **ARM cross-compilation is slow**: Use host filtering to avoid building ARM hosts when not needed
|
||||
- Example: `./scripts/diff-configs.sh c1 c2 c3` (x86_64 only, fast)
|
||||
- vs `./scripts/diff-configs.sh` (includes stinky/alo-cloud-1, slow)
|
||||
|
||||
### When to Use
|
||||
|
||||
**Good use cases**:
|
||||
- Refactoring shared modules (like impermanence)
|
||||
- Updating common configurations
|
||||
- Before committing significant changes
|
||||
- Verifying deploy target consistency
|
||||
|
||||
**Not needed for**:
|
||||
- Adding a single new host
|
||||
- Trivial one-host changes
|
||||
- Documentation updates
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Not in a git repository"
|
||||
```bash
|
||||
cd /home/ppetru/projects/alo-cluster
|
||||
./scripts/diff-configs.sh
|
||||
```
|
||||
|
||||
### "No changes detected"
|
||||
All changes are already committed. Stage some changes first:
|
||||
```bash
|
||||
git add .
|
||||
```
|
||||
|
||||
### Build failures for all hosts
|
||||
Check flake syntax:
|
||||
```bash
|
||||
nix flake check
|
||||
```
|
||||
|
||||
### nvd not found
|
||||
Install nvd:
|
||||
```bash
|
||||
nix profile install nixpkgs#nvd
|
||||
```
|
||||
(Already included in workstation-node.nix packages)
|
||||
|
||||
## Related Tools
|
||||
|
||||
- `nvd` - Package diff tool (used internally)
|
||||
- `nix diff-closures` - Low-level closure diff
|
||||
- `nix store diff-closures` - Alternative diff command
|
||||
- `deploy-rs` - Actual deployment tool
|
||||
|
||||
## See Also
|
||||
|
||||
- `common/global/show-changelog.nix` - Shows changes during system activation
|
||||
- `docs/RASPBERRY_PI_SD_IMAGE.md` - SD image building process
|
||||
354
docs/HOMELAB_AGENT.md
Normal file
354
docs/HOMELAB_AGENT.md
Normal file
@@ -0,0 +1,354 @@
|
||||
# ABOUTME: Vision and design document for an AI agent that manages the homelab cluster.
|
||||
# ABOUTME: Covers emergent capabilities, technical approach, and implementation strategy.
|
||||
|
||||
# Homelab Agent: Vision and Design
|
||||
|
||||
## The Core Idea
|
||||
|
||||
Not automation. Not "LLM-powered autocomplete for infrastructure." Emergent capabilities.
|
||||
|
||||
The same shift Claude Code brought to programming: you describe outcomes, it handles implementation. You become a "product manager" for your infrastructure instead of an "infrastructure engineer."
|
||||
|
||||
The cluster stops being infrastructure you manage and becomes an environment that responds to intent.
|
||||
|
||||
## What Makes This Different From Automation
|
||||
|
||||
**Automation**: "If disk > 90%, delete old logs"
|
||||
|
||||
**Emergent**: "Disk is 95% full. What's using space? ...Postgres WAL. Can I safely checkpoint? Last backup was 2h ago, load is low, yes. Running checkpoint... down to 60%. I should note that WAL retention might need tuning."
|
||||
|
||||
The difference:
|
||||
- Novel problem-solving (not pattern matching)
|
||||
- Contextual safety reasoning
|
||||
- Adaptation to the specific situation
|
||||
- Learning for the future
|
||||
|
||||
## Examples of Genuinely New Capabilities
|
||||
|
||||
### 1. Intent-Driven Infrastructure
|
||||
|
||||
> "I want to run Synapse for Matrix"
|
||||
|
||||
Agent figures out: Nomad job spec, storage location, Traefik routing, TLS, Consul registration, backup config. Creates it, deploys it, validates it.
|
||||
|
||||
You don't need to know Nomad job format or Traefik labels. You describe the outcome.
|
||||
|
||||
### 2. Proactive Evolution (The Best One)
|
||||
|
||||
The agent doesn't wait for problems or instructions:
|
||||
|
||||
- "Synapse 1.98 has a security fix. I've tested it in a local build, no config changes needed. Deploy?"
|
||||
- "Your NFS server has been primary for 47 days. Want me to test failover to make sure it still works?"
|
||||
- "I noticed arr services all have the same resource limits but Sonarr consistently uses more. Adjusted."
|
||||
- "There's a new NixOS module for Traefik that simplifies your current setup. Here's the diff."
|
||||
|
||||
Not monitoring. Stewardship.
|
||||
|
||||
### 3. The Cluster Has Opinions
|
||||
|
||||
> You: "I want to add Plex"
|
||||
>
|
||||
> Agent: "You already have Jellyfin, which does the same thing. If you want Plex specifically for its mobile app, I can set it up to share Jellyfin's media library. Or if you want to switch entirely, I can migrate watch history. What's the actual goal?"
|
||||
|
||||
Not a command executor. A collaborator that understands your system.
|
||||
|
||||
### 4. "Bring This Into the Cluster"
|
||||
|
||||
You're running something in Docker on a random VM:
|
||||
|
||||
> "Bring this into the cluster"
|
||||
|
||||
Agent: connects, inspects, figures out dependencies, writes Nomad job, sets up storage, migrates data, routes traffic, validates, decommissions old instance.
|
||||
|
||||
You didn't need to know how.
|
||||
|
||||
### 5. Cross-Cutting Changes
|
||||
|
||||
> "Add authentication to all public-facing services"
|
||||
|
||||
Agent identifies which services are public, understands the auth setup (Pocket ID + traefik-oidc-auth), modifies each service's config, tests that auth works.
|
||||
|
||||
Single coherent change across everything, without knowing every service yourself.
|
||||
|
||||
### 6. Emergent Debugging
|
||||
|
||||
Not runbooks. Actual reasoning:
|
||||
|
||||
> "The blog is slow"
|
||||
|
||||
Agent checks service health (fine), node resources (fine), network latency (fine), database queries (ah, slow query), traces to missing index, adds index, validates performance improved.
|
||||
|
||||
Solved a problem nobody wrote a runbook for.
|
||||
|
||||
### 7. Architecture Exploration
|
||||
|
||||
> "What if we added a third Nomad server for better quorum?"
|
||||
|
||||
Agent reasons about current topology, generates the config, identifies what would change, shows blast radius. Thinking partner for infrastructure decisions.
|
||||
|
||||
## Why Nix Makes This Possible
|
||||
|
||||
Traditional infrastructure: state is scattered and implicit. Nix: everything is declared.
|
||||
|
||||
- **Full system understanding** - agent can read the flake and understand EVERYTHING
|
||||
- **Safe experimentation** - build without deploying, rollback trivially
|
||||
- **Reproducibility** - "what was the state 3 days ago?" can be rebuilt exactly
|
||||
- **Composition** - agent can generate valid configs that compose correctly
|
||||
- **The ecosystem** - 80k+ packages, thousands of modules the agent can navigate
|
||||
|
||||
> "I want a VPN that works with my phone"
|
||||
|
||||
Agent knows Nix, finds WireGuard module, configures it, generates QR codes, opens firewall. You didn't learn WireGuard.
|
||||
|
||||
## The Validation Pattern
|
||||
|
||||
Just like code has linting and tests, infrastructure actions need validation:
|
||||
|
||||
| Phase | Code | Infrastructure |
|
||||
|-------|------|----------------|
|
||||
| Static | Lint, typecheck | Config parses, secrets exist, no port conflicts |
|
||||
| Pre-flight | — | Cluster healthy, dependencies up, quorum intact |
|
||||
| Post-action | Unit tests | Service started, health checks pass, metrics flowing |
|
||||
| Invariants | CI | NFS mounted, Consul quorum, replication current |
|
||||
|
||||
The agent can take actions confidently because it validates outcomes.
|
||||
|
||||
## The Reality Check
|
||||
|
||||
Some of this works today. Some would fail spectacularly. Some would fail silently and idiotically. Just like Claude Code for coding.
|
||||
|
||||
Therefore:
|
||||
- Tight loop with the human operator
|
||||
- Assume the human is competent and knowledgeable
|
||||
- Agent amplifies expertise, doesn't replace it
|
||||
- Escalate when uncertain
|
||||
|
||||
## Technical Approach
|
||||
|
||||
### Runtime: Claude Code (Not Agent SDK)
|
||||
|
||||
Two options were considered:
|
||||
|
||||
| Tool | Pro/Max Subscription | API Billing |
|
||||
|------|---------------------|-------------|
|
||||
| Claude Code CLI | Yes | Yes |
|
||||
| Claude Agent SDK | No | Required |
|
||||
|
||||
Claude Code can use existing Max subscription. Agent SDK requires separate API billing.
|
||||
|
||||
For v1, use Claude Code as the runtime:
|
||||
|
||||
```bash
|
||||
claude --print "prompt" \
|
||||
--allowedTools "Bash,Read,Edit" \
|
||||
--permission-mode acceptEdits
|
||||
```
|
||||
|
||||
Graduate to Agent SDK later if limitations are hit.
|
||||
|
||||
### Trigger Architecture
|
||||
|
||||
On-demand Claude Code sessions, triggered by:
|
||||
- **Timer** - periodic health/sanity check
|
||||
- **Alert** - alertmanager webhook
|
||||
- **Event** - systemd OnFailure, consul watch
|
||||
- **Manual** - invoke with a goal
|
||||
|
||||
Each trigger provides context and a goal. Claude Code does the rest.
|
||||
|
||||
### Structure
|
||||
|
||||
```
|
||||
agent/
|
||||
├── triggers/
|
||||
│ ├── scheduled-check # systemd timer
|
||||
│ ├── on-alert # webhook handler
|
||||
│ └── on-failure # systemd OnFailure target
|
||||
├── gather-context.sh # snapshot of cluster state
|
||||
└── goals/
|
||||
├── health-check.md # verify health, fix if safe
|
||||
├── incident.md # investigate alert, fix or escalate
|
||||
└── proactive.md # look for improvements
|
||||
```
|
||||
|
||||
### Example: Scheduled Health Check
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
CONTEXT=$(./gather-context.sh)
|
||||
GOAL=$(cat goals/health-check.md)
|
||||
|
||||
claude --print "
|
||||
## Context
|
||||
$CONTEXT
|
||||
|
||||
## Goal
|
||||
$GOAL
|
||||
|
||||
## Constraints
|
||||
- You can read any file in this repo
|
||||
- You can run nomad/consul/systemctl commands
|
||||
- You can edit Nix/HCL files and run deploy
|
||||
- Before destructive actions, validate with nix build or nomad plan
|
||||
- If uncertain about safety, output a summary and stop
|
||||
"
|
||||
```
|
||||
|
||||
### Context Gathering
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
echo "=== Nomad Jobs ==="
|
||||
nomad job status
|
||||
|
||||
echo "=== Consul Members ==="
|
||||
consul members
|
||||
|
||||
echo "=== Failed Systemd Units ==="
|
||||
systemctl --failed
|
||||
|
||||
echo "=== Recent Errors (last hour) ==="
|
||||
journalctl --since "1 hour ago" -p err --no-pager | tail -100
|
||||
```
|
||||
|
||||
## Edge Cases and the Nix Promise
|
||||
|
||||
The NixOS promise mostly works, but sometimes doesn't:
|
||||
- Mount option changes that require reboot
|
||||
- Transition states where switch fails even if end state is correct
|
||||
- Partial application where switch "succeeds" but change didn't take effect
|
||||
|
||||
This is where the agent adds value: it can detect when a change needs special handling, apply the appropriate strategy, and verify the change actually took effect.
|
||||
|
||||
## Capturing Knowledge
|
||||
|
||||
Document edge cases as they're discovered:
|
||||
|
||||
```markdown
|
||||
## CIFS/NFS mount option changes
|
||||
Switch may fail or succeed without effect. Strategy:
|
||||
1. Try normal deploy
|
||||
2. If mount options don't match after, reboot required
|
||||
3. If deploy fails with mount busy, local switch + reboot
|
||||
```
|
||||
|
||||
The agent reads this, uses it as context, but can also reason about novel situations.
|
||||
|
||||
## Path to CI/CD
|
||||
|
||||
Eventually: push to main triggers deploy via agent.
|
||||
|
||||
```
|
||||
push to main
|
||||
|
|
||||
build all configs (mechanical)
|
||||
|
|
||||
agent: "what changed? is this safe to auto-deploy?"
|
||||
|
|
||||
├─ clean change -> deploy, validate, done
|
||||
├─ needs reboot -> deploy, schedule reboot, validate after
|
||||
├─ risky change -> notify for manual approval
|
||||
└─ failed -> diagnose, retry with different strategy, or escalate
|
||||
|
|
||||
post-deploy verification
|
||||
|
|
||||
notification
|
||||
```
|
||||
|
||||
The agent is the intelligence layer on top of mechanical CI/CD.
|
||||
|
||||
## Research: What Others Are Doing (January 2026)
|
||||
|
||||
### Existing Projects & Approaches
|
||||
|
||||
**n8n + Ollama Stack**
|
||||
The most common pattern is n8n (workflow orchestration) + Ollama (local LLM). Webhooks from
|
||||
monitoring (Netdata/Prometheus) trigger AI-assisted diagnosis. Philosophy from one practitioner:
|
||||
"train an employee, not a bot" — build trust, gradually grant autonomy.
|
||||
|
||||
Sources:
|
||||
- [Virtualization Howto: Self-Healing Home Lab](https://www.virtualizationhowto.com/2025/10/how-i-built-a-self-healing-home-lab-that-fixes-itself/)
|
||||
- [addROM: AI Agent for Homelab with n8n](https://addrom.com/unleashing-the-power-of-an-ai-agent-for-homelab-management-with-n8n/)
|
||||
|
||||
**Local Infrastructure Agent (Kelcode)**
|
||||
Architecture: user question → tool router → query processor → LLM response. Connects to
|
||||
Kubernetes, Prometheus, Harbor Registry.
|
||||
|
||||
Key insight: "The AI's output definition must be perfectly synchronized with the software
|
||||
it's trying to use." Their K8s tool failed because the prompt generated kubectl commands
|
||||
while the code expected structured data objects.
|
||||
|
||||
Uses phi4-mini via Ollama for routing decisions after testing multiple models.
|
||||
|
||||
Source: [Kelcode: Building a Homelab Agentic Ecosystem](https://kelcode.co.uk/building-a-homelab-agentic-ecosystem-part1/)
|
||||
|
||||
**nixai**
|
||||
AI assistant specifically for NixOS. Searches NixOS Wiki, Nixpkgs Manual, nix.dev, Home Manager
|
||||
docs. Diagnoses issues from piped logs/errors. Privacy-first: defaults to local Ollama.
|
||||
|
||||
Limited scope — helper tool, not autonomous agent. But shows NixOS-specific tooling is possible.
|
||||
|
||||
Source: [NixOS Discourse: Introducing nixai](https://discourse.nixos.org/t/introducing-nixai-your-ai-powered-nixos-companion/65168)
|
||||
|
||||
**AI-Friendly Infrastructure (The Merino Wolf)**
|
||||
Key insight: make infrastructure "AI-friendly" through structured documentation. CLAUDE.md
|
||||
provides comprehensive context — "structured knowledge transfer."
|
||||
|
||||
Lessons:
|
||||
- "Context investment pays dividends" — comprehensive documentation is the most valuable asset
|
||||
- Layered infrastructure design mirrors how both humans and AI think
|
||||
- Rule-based guidance enforces safety practices automatically
|
||||
|
||||
Source: [The Merino Wolf: AI-Powered Homelab](https://themerinowolf.com/posts/ai-powered-homelab/)
|
||||
|
||||
**Claude Code Infrastructure Patterns**
|
||||
Solves "skills don't activate automatically" problem using hooks (UserPromptSubmit, PostToolUse)
|
||||
+ skill-rules.json for auto-activation.
|
||||
|
||||
500-line rule with progressive disclosure: main file for high-level guidance, resource files
|
||||
for deep dives. Claude loads materials incrementally as needed.
|
||||
|
||||
Persistence pattern across context resets using three-file structures (plan, context, tasks).
|
||||
|
||||
Born from 6 months managing TypeScript microservices (50k+ lines).
|
||||
|
||||
Source: [diet103/claude-code-infrastructure-showcase](https://github.com/diet103/claude-code-infrastructure-showcase)
|
||||
|
||||
### Patterns That Work
|
||||
|
||||
- Local LLMs (Ollama) + workflow orchestration (n8n) is the popular stack
|
||||
- Start with read-only/diagnostic agents, gradually add write access
|
||||
- Pre-approved command lists for safety (e.g., 50 validated bash commands max)
|
||||
- Structured documentation as foundation — AI is only as good as its context
|
||||
- Multi-step tool use: agent plans, then executes steps, observing results
|
||||
|
||||
### What's Missing in the Space
|
||||
|
||||
- Nobody's doing true "emergent capabilities" yet — mostly tool routing
|
||||
- Most projects are Kubernetes/Docker focused, not NixOS
|
||||
- Few examples of proactive stewardship (our example #2)
|
||||
- Limited examples of agents that understand the whole system coherently
|
||||
|
||||
### Community Skepticism
|
||||
|
||||
From Reddit discussions: doubts exist about using LLM agents in production. Although LLMs can
|
||||
automate specific tasks, they frequently need human involvement for intricate decision-making.
|
||||
|
||||
This validates our approach: tight loop with a competent human, not autonomous operation.
|
||||
|
||||
### The Gap We'd Fill
|
||||
|
||||
- NixOS-native agent leveraging declarative config as source of truth
|
||||
- True emergence — not just tool routing, but reasoning about novel situations
|
||||
- Proactive evolution, not just reactive troubleshooting
|
||||
- Tight human loop with a competent operator
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Build trigger infrastructure (systemd timer, basic webhook handler)
|
||||
2. Write context gathering scripts
|
||||
3. Define goal prompts for common scenarios
|
||||
4. Test with scheduled health checks
|
||||
5. Iterate based on what works and what doesn't
|
||||
6. Document edge cases as they're discovered
|
||||
7. Gradually expand scope as confidence grows
|
||||
@@ -37,17 +37,17 @@ See [CLUSTER_REVAMP.md](./CLUSTER_REVAMP.md) for detailed procedures.
|
||||
## Phase 3: Migrate from GlusterFS to NFS
|
||||
- [x] Update all nodes to mount NFS at `/data/services`
|
||||
- [x] Deploy updated configs (NFS client on all nodes)
|
||||
- [ ] Stop all Nomad jobs temporarily
|
||||
- [ ] Copy data from GlusterFS to zippy NFS
|
||||
- [ ] Copy `/data/compute/appdata/*` → `/persist/services/appdata/`
|
||||
- [ ] Copy `/data/compute/config/*` → `/persist/services/config/`
|
||||
- [ ] Copy `/data/sync/wordpress` → `/persist/services/appdata/wordpress`
|
||||
- [ ] Verify data integrity
|
||||
- [ ] Verify NFS mounts working on all nodes
|
||||
- [ ] Stop GlusterFS volume
|
||||
- [ ] Delete GlusterFS volume
|
||||
- [ ] Remove GlusterFS from NixOS configs
|
||||
- [ ] Remove syncthing wordpress sync configuration
|
||||
- [x] Stop all Nomad jobs temporarily
|
||||
- [x] Copy data from GlusterFS to zippy NFS
|
||||
- [x] Copy `/data/compute/appdata/*` → `/persist/services/appdata/`
|
||||
- [x] Copy `/data/compute/config/*` → `/persist/services/config/`
|
||||
- [x] Copy `/data/sync/wordpress` → `/persist/services/appdata/wordpress`
|
||||
- [x] Verify data integrity
|
||||
- [x] Verify NFS mounts working on all nodes
|
||||
- [x] Stop GlusterFS volume
|
||||
- [x] Delete GlusterFS volume
|
||||
- [x] Remove GlusterFS from NixOS configs
|
||||
- [x] Remove syncthing wordpress sync configuration (no longer used)
|
||||
|
||||
## Phase 4: Update and redeploy Nomad jobs
|
||||
|
||||
@@ -125,8 +125,8 @@ See [CLUSTER_REVAMP.md](./CLUSTER_REVAMP.md) for detailed procedures.
|
||||
- [ ] Verify backups include `/persist/services` data
|
||||
- [ ] Verify backups exclude replication snapshots
|
||||
- [ ] Update documentation (README.md, architecture diagrams)
|
||||
- [ ] Clean up old GlusterFS data (only after everything verified!)
|
||||
- [ ] Remove old glusterfs directories from all nodes
|
||||
- [x] Clean up old GlusterFS data (only after everything verified!)
|
||||
- [x] Remove old glusterfs directories from all nodes
|
||||
|
||||
## Post-Migration Checklist
|
||||
- [ ] All 5 servers in quorum (consul members)
|
||||
@@ -143,8 +143,8 @@ See [CLUSTER_REVAMP.md](./CLUSTER_REVAMP.md) for detailed procedures.
|
||||
|
||||
---
|
||||
|
||||
**Last updated**: 2025-10-23 22:30
|
||||
**Current phase**: Phase 4 complete! All services migrated to NFS
|
||||
**Last updated**: 2025-10-25
|
||||
**Current phase**: Phase 3 & 4 complete! GlusterFS removed, all services on NFS
|
||||
**Note**: Phase 1 (fractal NixOS conversion) deferred until after GlusterFS migration is complete
|
||||
|
||||
## Migration Summary
|
||||
|
||||
98
docs/RASPBERRY_PI_SD_IMAGE.md
Normal file
98
docs/RASPBERRY_PI_SD_IMAGE.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Raspberry Pi SD Image Building and Deployment
|
||||
|
||||
Guide for building and deploying NixOS SD card images for Raspberry Pi hosts (e.g., stinky).
|
||||
|
||||
## Overview
|
||||
|
||||
Raspberry Pi hosts use a different deployment strategy than regular NixOS hosts:
|
||||
- **First deployment**: Build and flash an SD card image
|
||||
- **Subsequent updates**: Use `deploy-rs` like other hosts
|
||||
|
||||
## Architecture
|
||||
|
||||
### Storage Layout
|
||||
|
||||
**Partition structure** (automatically created by NixOS):
|
||||
- `/boot/firmware` - FAT32 partition (label: `FIRMWARE`)
|
||||
- Contains Raspberry Pi firmware, U-Boot bootloader, device trees
|
||||
- `/` - tmpfs (in-memory, ephemeral root)
|
||||
- 2GB RAM disk, wiped on every boot
|
||||
- `/nix` - ext4 partition (label: `NIXOS_SD`)
|
||||
- Nix store and persistent data
|
||||
- Contains `/nix/persist` directory for impermanence
|
||||
|
||||
### Impermanence with tmpfs
|
||||
|
||||
Unlike btrfs-based hosts that use `/persist`, Pi hosts use `/nix/persist`:
|
||||
- Root filesystem is tmpfs (no disk writes, auto-wiped)
|
||||
- Single ext4 partition mounted at `/nix`
|
||||
- Persistent data stored in `/nix/persist/` (directory, not separate mount)
|
||||
- Better for SD card longevity (fewer writes)
|
||||
|
||||
**Persisted paths**:
|
||||
- `/nix/persist/var/lib/nixos` - System state
|
||||
- `/nix/persist/home/ppetru` - User home directory
|
||||
- `/nix/persist/etc` - SSH host keys, machine-id
|
||||
- Service-specific: `/nix/persist/var/lib/octoprint`, etc.
|
||||
|
||||
## Building the SD Image
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- ARM64 emulation enabled on build machine:
|
||||
```nix
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
```
|
||||
(Already configured in `workstation-node.nix`)
|
||||
|
||||
### Build Command
|
||||
|
||||
```bash
|
||||
# Build SD image for stinky
|
||||
nix build .#packages.aarch64-linux.stinky-sdImage
|
||||
|
||||
# Result location
|
||||
ls -lh result/sd-image/
|
||||
# nixos-sd-image-stinky-25.05-*.img.zst (compressed with zstd)
|
||||
```
|
||||
|
||||
**Build location**: Defined in `flake.nix`:
|
||||
```nix
|
||||
packages.aarch64-linux.stinky-sdImage =
|
||||
self.nixosConfigurations.stinky.config.system.build.sdImage;
|
||||
```
|
||||
|
||||
## Flashing the SD Card
|
||||
|
||||
### Find SD Card Device
|
||||
|
||||
```bash
|
||||
# Before inserting SD card
|
||||
lsblk
|
||||
|
||||
# Insert SD card, then check again
|
||||
lsblk
|
||||
|
||||
# Look for new device, typically:
|
||||
# - /dev/sdX (USB SD card readers)
|
||||
# - /dev/mmcblk0 (built-in SD card slots)
|
||||
```
|
||||
|
||||
**Warning**: Double-check the device! Wrong device = data loss.
|
||||
|
||||
### Flash Image
|
||||
|
||||
```bash
|
||||
# Decompress and flash in one command
|
||||
zstd -d -c result/sd-image/*.img.zst | sudo dd of=/dev/sdX bs=4M status=progress conv=fsync
|
||||
|
||||
# Or decompress first, then flash
|
||||
unzstd result/sd-image/*.img.zst
|
||||
sudo dd if=result/sd-image/*.img of=/dev/sdX bs=4M status=progress conv=fsync
|
||||
```
|
||||
|
||||
### Eject SD Card
|
||||
|
||||
```bash
|
||||
sudo eject /dev/sdX
|
||||
```
|
||||
@@ -1,3 +1,7 @@
|
||||
* remote docker images used, can't come up if internet is down
|
||||
* local docker images pulled from gitea, can't come up if gitea isn't up (yet)
|
||||
* traefik-oidc-auth plugin downloaded from GitHub at startup (cached in /data/services/traefik/plugins-storage)
|
||||
* renovate system of some kind
|
||||
* vector (or other log ingestion) everywhere, consider moving it off docker if possible
|
||||
* monitor backup-persist success/fail
|
||||
* gitea organization is public -> at least from the internal network, anyone can pull images and probably also clone repos. there should be absolutely zero secrets in the repos (and the ones that are now should be changed before stored somewhere else) and the nomad workers should authenticate to pull images
|
||||
|
||||
312
flake.lock
generated
312
flake.lock
generated
@@ -1,5 +1,21 @@
|
||||
{
|
||||
"nodes": {
|
||||
"base16-schemes": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696158499,
|
||||
"narHash": "sha256-5yIHgDTPjoX/3oDEfLSQ0eJZdFL1SaCfb9d6M0RmOTM=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "base16-schemes",
|
||||
"rev": "a9112eaae86d9dd8ee6bb9445b664fba2f94037a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "tinted-theming",
|
||||
"repo": "base16-schemes",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"browser-previews": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
@@ -9,11 +25,11 @@
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1761247699,
|
||||
"narHash": "sha256-yMQCRsD6F6eyt0ckCbAHH3W59mav7rbn9hwfUWd+rHU=",
|
||||
"lastModified": 1769196967,
|
||||
"narHash": "sha256-js2jXLzaZbXNFkYTszQntIS8QUJYJumSFK+3bR5nhlo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "browser-previews",
|
||||
"rev": "5fc2e2d88f87b46b72767fd6fc2d4af7d983f2c7",
|
||||
"rev": "edc3b1c0455abc74bfe2d6e029abe5fc778b0d62",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -31,11 +47,11 @@
|
||||
"utils": "utils"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1756719547,
|
||||
"narHash": "sha256-N9gBKUmjwRKPxAafXEk1EGadfk2qDZPBQp4vXWPHINQ=",
|
||||
"lastModified": 1766051518,
|
||||
"narHash": "sha256-znKOwPXQnt3o7lDb3hdf19oDo0BLP4MfBOYiWkEHoik=",
|
||||
"owner": "serokell",
|
||||
"repo": "deploy-rs",
|
||||
"rev": "125ae9e3ecf62fb2c0fd4f2d894eb971f1ecaed2",
|
||||
"rev": "d5eff7f948535b9c723d60cd8239f8f11ddc90fa",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -52,11 +68,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1741473158,
|
||||
"narHash": "sha256-kWNaq6wQUbUMlPgw8Y+9/9wP0F8SHkjy24/mN3UAppg=",
|
||||
"lastModified": 1768818222,
|
||||
"narHash": "sha256-460jc0+CZfyaO8+w8JNtlClB2n4ui1RbHfPTLkpwhU8=",
|
||||
"owner": "numtide",
|
||||
"repo": "devshell",
|
||||
"rev": "7c9e793ebe66bcba8292989a68c0419b737a22a0",
|
||||
"rev": "255a2b1725a20d060f566e4755dbf571bbbb5f76",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -72,11 +88,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760701190,
|
||||
"narHash": "sha256-y7UhnWlER8r776JsySqsbTUh2Txf7K30smfHlqdaIQw=",
|
||||
"lastModified": 1768923567,
|
||||
"narHash": "sha256-GVJ0jKsyXLuBzRMXCDY6D5J8wVdwP1DuQmmvYL/Vw/Q=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "3a9450b26e69dcb6f8de6e2b07b3fc1c288d85f5",
|
||||
"rev": "00395d188e3594a1507f214a2f15d4ce5c07cb28",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -88,7 +104,6 @@
|
||||
"ethereum-nix": {
|
||||
"inputs": {
|
||||
"devshell": "devshell",
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-parts": "flake-parts",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"foundry-nix": "foundry-nix",
|
||||
@@ -100,11 +115,11 @@
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1761217923,
|
||||
"narHash": "sha256-usNQQSwmaHdHiRttmH4no/CVTUyEP+sIoAkkRMgdu0g=",
|
||||
"lastModified": 1769298686,
|
||||
"narHash": "sha256-ZwsxXeLyrb5VinFsdjrjt/J7Tp5O2A9yy7lxWaw/h78=",
|
||||
"owner": "nix-community",
|
||||
"repo": "ethereum.nix",
|
||||
"rev": "8c3827adc7e1ea75b43ad3d7c4f9ab9acc3b6273",
|
||||
"rev": "d52663e0592ced611098f80224b45e57d7223453",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -129,31 +144,16 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"locked": {
|
||||
"lastModified": 1746162366,
|
||||
"narHash": "sha256-5SSSZ/oQkwfcAz/o/6TlejlVGqeK08wyREBQ5qFFPhM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f158086a2ecdbb138cd0429410e44994f1b7e4b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760813311,
|
||||
"narHash": "sha256-lbHQ7FXGzt6/IygWvJ1lCq+Txcut3xYYd6VIpF1ojkg=",
|
||||
"lastModified": 1768135262,
|
||||
"narHash": "sha256-PVvu7OqHBGWN16zSi6tEmPwwHQ4rLPU9Plvs8/1TUBY=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "4e627ac2e1b8f1de7f5090064242de9a259dbbc8",
|
||||
"rev": "80daad04eddbbf5a4d883996a73f3f542fa437ac",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -170,11 +170,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760948891,
|
||||
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
|
||||
"lastModified": 1768135262,
|
||||
"narHash": "sha256-PVvu7OqHBGWN16zSi6tEmPwwHQ4rLPU9Plvs8/1TUBY=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
|
||||
"rev": "80daad04eddbbf5a4d883996a73f3f542fa437ac",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -225,24 +225,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_3": {
|
||||
"inputs": {
|
||||
"systems": "systems_4"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"foundry-nix": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
@@ -255,11 +237,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1759569036,
|
||||
"narHash": "sha256-FuxbXLDArxD1NeRR8zNnsb8Xww5/+qdMwzN1m8Kow/M=",
|
||||
"lastModified": 1767517855,
|
||||
"narHash": "sha256-LnZosb07bahYAyFw07JFzSXslx9j1dCe+npWDZdPFZg=",
|
||||
"owner": "shazow",
|
||||
"repo": "foundry.nix",
|
||||
"rev": "47ba6d3b02bf3faaa857d3572df82ff186d5279a",
|
||||
"rev": "ee376e8a93f537c2865dda9811e748e4567a7aaf",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -276,27 +258,52 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1758463745,
|
||||
"narHash": "sha256-uhzsV0Q0I9j2y/rfweWeGif5AWe0MGrgZ/3TjpDYdGA=",
|
||||
"lastModified": 1768949235,
|
||||
"narHash": "sha256-TtjKgXyg1lMfh374w5uxutd6Vx2P/hU81aEhTxrO2cg=",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "3b955f5f0a942f9f60cdc9cacb7844335d0f21c3",
|
||||
"rev": "75ed713570ca17427119e7e204ab3590cc3bf2a5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "release-25.11",
|
||||
"repo": "home-manager",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"home-manager_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"impermanence",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1768598210,
|
||||
"narHash": "sha256-kkgA32s/f4jaa4UG+2f8C225Qvclxnqs76mf8zvTVPg=",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "c47b2cc64a629f8e075de52e4742de688f930dc6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "release-25.05",
|
||||
"repo": "home-manager",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"impermanence": {
|
||||
"inputs": {
|
||||
"home-manager": "home-manager_2",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1737831083,
|
||||
"narHash": "sha256-LJggUHbpyeDvNagTUrdhe/pRVp4pnS6wVKALS782gRI=",
|
||||
"lastModified": 1768941735,
|
||||
"narHash": "sha256-OyxsfXNcOkt06/kM+4bnuC8moDx+t7Qr+RB0BBa83Ig=",
|
||||
"owner": "nix-community",
|
||||
"repo": "impermanence",
|
||||
"rev": "4b3e914cdf97a5b536a889e939fb2fd2b043a170",
|
||||
"rev": "69ecf31e8fddc9354a4b418f3a517445d486bb54",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -305,31 +312,22 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"ixx": {
|
||||
"nix-colors": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"nixvim",
|
||||
"nuschtosSearch",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixvim",
|
||||
"nuschtosSearch",
|
||||
"nixpkgs"
|
||||
]
|
||||
"base16-schemes": "base16-schemes",
|
||||
"nixpkgs-lib": "nixpkgs-lib_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754860581,
|
||||
"narHash": "sha256-EM0IE63OHxXCOpDHXaTyHIOk2cNvMCGPqLt/IdtVxgk=",
|
||||
"owner": "NuschtOS",
|
||||
"repo": "ixx",
|
||||
"rev": "babfe85a876162c4acc9ab6fb4483df88fa1f281",
|
||||
"lastModified": 1707825078,
|
||||
"narHash": "sha256-hTfge2J2W+42SZ7VHXkf4kjU+qzFqPeC9k66jAUBMHk=",
|
||||
"owner": "misterio77",
|
||||
"repo": "nix-colors",
|
||||
"rev": "b01f024090d2c4fc3152cd0cf12027a7b8453ba1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NuschtOS",
|
||||
"ref": "v0.1.1",
|
||||
"repo": "ixx",
|
||||
"owner": "misterio77",
|
||||
"repo": "nix-colors",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
@@ -340,11 +338,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760846226,
|
||||
"narHash": "sha256-xmU8kAsRprJiTGBTaGrwmjBP3AMA9ltlrxHKFuy5JWc=",
|
||||
"lastModified": 1765267181,
|
||||
"narHash": "sha256-d3NBA9zEtBu2JFMnTBqWj7Tmi7R5OikoU2ycrdhQEws=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-index-database",
|
||||
"rev": "5024e1901239a76b7bf94a4cd27f3507e639d49e",
|
||||
"rev": "82befcf7dc77c909b0f2a09f5da910ec95c5b78f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -353,29 +351,60 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"nixos-hardware": {
|
||||
"locked": {
|
||||
"lastModified": 1761016216,
|
||||
"narHash": "sha256-G/iC4t/9j/52i/nm+0/4ybBmAF4hzR8CNHC75qEhjHo=",
|
||||
"lastModified": 1769302137,
|
||||
"narHash": "sha256-QEDtctEkOsbx8nlFh4yqPEOtr4tif6KTqWwJ37IM2ds=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "481cf557888e05d3128a76f14c76397b7d7cc869",
|
||||
"repo": "nixos-hardware",
|
||||
"rev": "a351494b0e35fd7c0b7a1aae82f0afddf4907aa8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05",
|
||||
"ref": "master",
|
||||
"repo": "nixos-hardware",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1768564909,
|
||||
"narHash": "sha256-Kell/SpJYVkHWMvnhqJz/8DqQg2b6PguxVWOuadbHCc=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e4bae1bd10c9c57b2cf517953ab70060a828ee6f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1754788789,
|
||||
"narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=",
|
||||
"lastModified": 1765674936,
|
||||
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "a73b9c743612e4244d865a2fdee11865283c04e6",
|
||||
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib_2": {
|
||||
"locked": {
|
||||
"lastModified": 1697935651,
|
||||
"narHash": "sha256-qOfWjQ2JQSQL15KLh6D7xQhx0qgZlYZTYlcEiRuAMMw=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "e1e11fdbb01113d85c7f41cada9d2847660e3902",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -386,11 +415,11 @@
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1760872779,
|
||||
"narHash": "sha256-c5C907Raf9eY8f1NUXYeju9aUDlm227s/V0OptEbypA=",
|
||||
"lastModified": 1769092226,
|
||||
"narHash": "sha256-6h5sROT/3CTHvzPy9koKBmoCa2eJKh4fzQK8eYFEgl8=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "63bdb5d90fa2fa11c42f9716ad1e23565613b07c",
|
||||
"rev": "b579d443b37c9c5373044201ea77604e37e748c8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -402,11 +431,11 @@
|
||||
},
|
||||
"nixpkgs-unstable_2": {
|
||||
"locked": {
|
||||
"lastModified": 1761114652,
|
||||
"narHash": "sha256-f/QCJM/YhrV/lavyCVz8iU3rlZun6d+dAiC3H+CDle4=",
|
||||
"lastModified": 1769170682,
|
||||
"narHash": "sha256-oMmN1lVQU0F0W2k6OI3bgdzp2YOHWYUAw79qzDSjenU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "01f116e4df6a15f4ccdffb1bcd41096869fb385c",
|
||||
"rev": "c5296fdd05cfa2c187990dd909864da9658df755",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -416,21 +445,36 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1769089682,
|
||||
"narHash": "sha256-9yA/LIuAVQq0lXelrZPjLuLVuZdm03p8tfmHhnDIkms=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "078d69f03934859a181e81ba987c2bb033eebfc5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixvim": {
|
||||
"inputs": {
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
],
|
||||
"nuschtosSearch": "nuschtosSearch",
|
||||
"systems": "systems_5"
|
||||
"systems": "systems_4"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1761222236,
|
||||
"narHash": "sha256-Um296vYIWjSjm4btukpjyVPLIz5ovQgoAtEYXFb/Jr4=",
|
||||
"lastModified": 1769247851,
|
||||
"narHash": "sha256-fbsopU0qWfqq1WRKjWYpYCMxmEYyq+Cmw++VXVke5Ns=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixvim",
|
||||
"rev": "39443b9f5737b6f8ee0b654eb47d3a64daac1bd0",
|
||||
"rev": "34a7d94cdcd2b034eb06202992bed1345aa046c9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -439,29 +483,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nuschtosSearch": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_3",
|
||||
"ixx": "ixx",
|
||||
"nixpkgs": [
|
||||
"nixvim",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760652422,
|
||||
"narHash": "sha256-C88Pgz38QIl9JxQceexqL2G7sw9vodHWx1Uaq+NRJrw=",
|
||||
"owner": "NuschtOS",
|
||||
"repo": "search",
|
||||
"rev": "3ebeebe8b6a49dfb11f771f761e0310f7c48d726",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NuschtOS",
|
||||
"repo": "search",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"browser-previews": "browser-previews",
|
||||
@@ -470,8 +491,10 @@
|
||||
"ethereum-nix": "ethereum-nix",
|
||||
"home-manager": "home-manager",
|
||||
"impermanence": "impermanence",
|
||||
"nix-colors": "nix-colors",
|
||||
"nix-index-database": "nix-index-database",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixos-hardware": "nixos-hardware",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable_2",
|
||||
"nixvim": "nixvim",
|
||||
"sops-nix": "sops-nix"
|
||||
@@ -484,11 +507,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760998189,
|
||||
"narHash": "sha256-ee2e1/AeGL5X8oy/HXsZQvZnae6XfEVdstGopKucYLY=",
|
||||
"lastModified": 1769314333,
|
||||
"narHash": "sha256-+Uvq9h2eGsbhacXpuS7irYO7fFlz514nrhPCSTkASlw=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "5a7d18b5c55642df5c432aadb757140edfeb70b3",
|
||||
"rev": "2eb9eed7ef48908e0f02985919f7eb9d33fa758f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -557,21 +580,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_5": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -580,11 +588,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760889407,
|
||||
"narHash": "sha256-ppIp04fmz+BaTpJs1nIOmPADg02asfQFrFbhb3SmxsE=",
|
||||
"lastModified": 1768158989,
|
||||
"narHash": "sha256-67vyT1+xClLldnumAzCTBvU0jLZ1YBcf4vANRWP3+Ak=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "3f258dead9fed51f53862366d3a6bc1b622ee7cb",
|
||||
"rev": "e96d59dff5c0d7fddb9d113ba108f03c3ef99eca",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
131
flake.nix
131
flake.nix
@@ -5,7 +5,7 @@
|
||||
deploy-rs.url = "github:serokell/deploy-rs";
|
||||
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
|
||||
impermanence.url = "github:nix-community/impermanence";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
disko.url = "github:nix-community/disko";
|
||||
disko.inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -14,7 +14,7 @@
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
};
|
||||
home-manager = {
|
||||
url = "github:nix-community/home-manager/release-25.05";
|
||||
url = "github:nix-community/home-manager/release-25.11";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
nix-index-database = {
|
||||
@@ -33,6 +33,8 @@
|
||||
url = "github:nix-community/browser-previews";
|
||||
inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
};
|
||||
nix-colors.url = "github:misterio77/nix-colors";
|
||||
nixos-hardware.url = "github:NixOS/nixos-hardware/master";
|
||||
};
|
||||
|
||||
outputs =
|
||||
@@ -47,6 +49,8 @@
|
||||
impermanence,
|
||||
sops-nix,
|
||||
browser-previews,
|
||||
nix-colors,
|
||||
nixos-hardware,
|
||||
...
|
||||
}@inputs:
|
||||
let
|
||||
@@ -54,25 +58,22 @@
|
||||
|
||||
overlay-unstable = final: prev: {
|
||||
unstable = import nixpkgs-unstable {
|
||||
inherit (prev) system;
|
||||
system = prev.stdenv.hostPlatform.system;
|
||||
config.allowUnfree = true;
|
||||
};
|
||||
};
|
||||
|
||||
overlay-browser-previews = final: prev: {
|
||||
browser-previews = browser-previews.packages.${prev.system};
|
||||
browser-previews = browser-previews.packages.${prev.stdenv.hostPlatform.system};
|
||||
};
|
||||
|
||||
overlay-custom = import ./pkgs;
|
||||
|
||||
mkHost =
|
||||
system: profile: modules:
|
||||
let
|
||||
# Auto-import profile-specific module based on profile parameter
|
||||
profileModule =
|
||||
if profile == "server" then ./common/server-node.nix
|
||||
else if profile == "workstation" then ./common/workstation-node.nix
|
||||
else if profile == "desktop" then ./common/desktop-node.nix
|
||||
else if profile == "cloud" then ./common/cloud-node.nix
|
||||
else null;
|
||||
# Profile parameter is only used by home-manager for user environment
|
||||
# NixOS system configuration is handled via explicit imports in host configs
|
||||
in
|
||||
nixpkgs.lib.nixosSystem {
|
||||
system = system;
|
||||
@@ -80,7 +81,7 @@
|
||||
(
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
nixpkgs.overlays = [ overlay-unstable overlay-browser-previews ];
|
||||
nixpkgs.overlays = [ overlay-unstable overlay-browser-previews overlay-custom ];
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
}
|
||||
)
|
||||
@@ -88,24 +89,30 @@
|
||||
sops-nix.nixosModules.sops
|
||||
impermanence.nixosModules.impermanence
|
||||
home-manager.nixosModules.home-manager
|
||||
{
|
||||
home-manager = {
|
||||
useGlobalPkgs = true;
|
||||
useUserPackages = true;
|
||||
users.ppetru = {
|
||||
imports = [
|
||||
(inputs.impermanence + "/home-manager.nix")
|
||||
inputs.nix-index-database.homeModules.nix-index
|
||||
inputs.nixvim.homeModules.nixvim
|
||||
./home
|
||||
];
|
||||
(
|
||||
{ lib, ... }:
|
||||
{
|
||||
home-manager = {
|
||||
useGlobalPkgs = true;
|
||||
useUserPackages = true;
|
||||
users.ppetru = {
|
||||
imports = [
|
||||
inputs.nix-index-database.homeModules.nix-index
|
||||
inputs.nixvim.homeModules.nixvim
|
||||
./home
|
||||
] ++ lib.optionals (profile == "desktop") [
|
||||
nix-colors.homeManagerModules.default
|
||||
];
|
||||
};
|
||||
extraSpecialArgs = {
|
||||
inherit profile nix-colors;
|
||||
};
|
||||
};
|
||||
extraSpecialArgs = {
|
||||
inherit profile;
|
||||
};
|
||||
};
|
||||
}
|
||||
] ++ nixpkgs.lib.optional (profileModule != null) profileModule ++ modules;
|
||||
}
|
||||
)
|
||||
] ++ nixpkgs.lib.optionals (profile == "desktop") [
|
||||
./common/desktop
|
||||
] ++ modules;
|
||||
specialArgs = {
|
||||
inherit inputs self;
|
||||
};
|
||||
@@ -136,16 +143,18 @@
|
||||
in
|
||||
{
|
||||
nixosConfigurations = {
|
||||
c1 = mkHost "x86_64-linux" "server" [ ./hosts/c1 ];
|
||||
c2 = mkHost "x86_64-linux" "server" [ ./hosts/c2 ];
|
||||
c3 = mkHost "x86_64-linux" "server" [ ./hosts/c3 ];
|
||||
c1 = mkHost "x86_64-linux" "minimal" [ ./hosts/c1 ];
|
||||
c2 = mkHost "x86_64-linux" "minimal" [ ./hosts/c2 ];
|
||||
c3 = mkHost "x86_64-linux" "minimal" [ ./hosts/c3 ];
|
||||
alo-cloud-1 = mkHost "aarch64-linux" "cloud" [ ./hosts/alo-cloud-1 ];
|
||||
zippy = mkHost "x86_64-linux" "workstation" [
|
||||
ethereum-nix.nixosModules.default
|
||||
./hosts/zippy
|
||||
];
|
||||
zippy = mkHost "x86_64-linux" "minimal" [ ./hosts/zippy ];
|
||||
chilly = mkHost "x86_64-linux" "workstation" [ ./hosts/chilly ];
|
||||
sparky = mkHost "x86_64-linux" "desktop" [ ./hosts/sparky ];
|
||||
sparky = mkHost "x86_64-linux" "minimal" [ ./hosts/sparky ];
|
||||
beefy = mkHost "x86_64-linux" "desktop" [ ./hosts/beefy ];
|
||||
stinky = mkHost "aarch64-linux" "minimal" [
|
||||
nixos-hardware.nixosModules.raspberry-pi-4
|
||||
./hosts/stinky
|
||||
];
|
||||
};
|
||||
|
||||
deploy = {
|
||||
@@ -172,7 +181,8 @@
|
||||
};
|
||||
};
|
||||
alo-cloud-1 = {
|
||||
hostname = "49.13.163.72";
|
||||
hostname = "alo-cloud-1";
|
||||
#hostname = "49.13.163.72";
|
||||
profiles = {
|
||||
system = {
|
||||
user = "root";
|
||||
@@ -207,9 +217,54 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
beefy = {
|
||||
hostname = "beefy";
|
||||
profiles = {
|
||||
system = {
|
||||
user = "root";
|
||||
path = (deployPkgsFor "x86_64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.beefy;
|
||||
};
|
||||
};
|
||||
};
|
||||
stinky = {
|
||||
hostname = "stinky";
|
||||
profiles = {
|
||||
system = {
|
||||
user = "root";
|
||||
path = (deployPkgsFor "aarch64-linux").deploy-rs.lib.activate.nixos self.nixosConfigurations.stinky;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# SD card image for stinky (Raspberry Pi 4)
|
||||
packages.aarch64-linux.stinky-sdImage = self.nixosConfigurations.stinky.config.system.build.sdImage;
|
||||
|
||||
# Apps - utility scripts
|
||||
apps.x86_64-linux.diff-configs = {
|
||||
type = "app";
|
||||
program = "${(pkgsFor "x86_64-linux").writeShellScriptBin "diff-configs" (builtins.readFile ./scripts/diff-configs.sh)}/bin/diff-configs";
|
||||
};
|
||||
|
||||
apps.aarch64-linux.diff-configs = {
|
||||
type = "app";
|
||||
program = "${(pkgsFor "aarch64-linux").writeShellScriptBin "diff-configs" (builtins.readFile ./scripts/diff-configs.sh)}/bin/diff-configs";
|
||||
};
|
||||
|
||||
# Development shells
|
||||
devShells.x86_64-linux.default = (pkgsFor "x86_64-linux").mkShell {
|
||||
packages = with (pkgsFor "x86_64-linux"); [
|
||||
nvd
|
||||
];
|
||||
};
|
||||
|
||||
devShells.aarch64-linux.default = (pkgsFor "aarch64-linux").mkShell {
|
||||
packages = with (pkgsFor "aarch64-linux"); [
|
||||
nvd
|
||||
];
|
||||
};
|
||||
|
||||
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
||||
|
||||
formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-rfc-style;
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
{ pkgs, profile ? "cli", ... }:
|
||||
{ pkgs, lib, profile ? "cli", ... }:
|
||||
let
|
||||
# Handle both file and directory imports for profiles
|
||||
# desktop is a directory, others are files
|
||||
profilePath =
|
||||
if builtins.pathExists ./programs/${profile}/default.nix
|
||||
then ./programs/${profile}
|
||||
else ./programs/${profile}.nix;
|
||||
in
|
||||
{
|
||||
imports = [ ./programs/${profile}.nix ];
|
||||
imports = [ profilePath ];
|
||||
|
||||
home = {
|
||||
packages = (import ./packages.nix { inherit pkgs profile; }).packages;
|
||||
@@ -12,7 +20,7 @@
|
||||
MOSH_SERVER_NETWORK_TMOUT = 604800;
|
||||
NOMAD_ADDR = "http://nomad.service.consul:4646";
|
||||
LESS = "-F -i -M -+S -R -w -X -z-4";
|
||||
SYSTEMD_LESS = "FiM+SRwXz-4";
|
||||
SYSTEMD_LESS = "FiM+SRwX";
|
||||
NIX_LD = "${pkgs.glibc}/lib/ld-linux-x86-64.so.2";
|
||||
NIX_LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
|
||||
pkgs.stdenv.cc.cc
|
||||
@@ -27,22 +35,12 @@
|
||||
reload-home-manager-config = "home-manager switch --flake ${builtins.toString ./.}";
|
||||
};
|
||||
|
||||
persistence."/persist/home/ppetru" = {
|
||||
directories = [
|
||||
".cache/nix"
|
||||
".cache/nix-index"
|
||||
".claude/"
|
||||
".codex/"
|
||||
".config/io.datasette.llm/"
|
||||
".config/sops/"
|
||||
".docker/"
|
||||
".local/share/direnv"
|
||||
".local/share/fish"
|
||||
".ssh"
|
||||
"projects"
|
||||
];
|
||||
files = [ ];
|
||||
allowOther = true;
|
||||
};
|
||||
file.".ssh/rc".text = ''
|
||||
#!/bin/sh
|
||||
if test "$SSH_AUTH_SOCK"; then
|
||||
ln -sf "$SSH_AUTH_SOCK" "$HOME/.ssh/ssh_auth_sock"
|
||||
fi
|
||||
'';
|
||||
file.".ssh/rc".executable = true;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,17 +1,31 @@
|
||||
# ABOUTME: Desktop profile package list
|
||||
# ABOUTME: Extends workstation with GUI and Wayland tools
|
||||
{ pkgs }:
|
||||
let
|
||||
workstationProfile = import ./workstation.nix { inherit pkgs; };
|
||||
|
||||
# Hyprland ecosystem packages
|
||||
hyprlandPkgs = with pkgs; [
|
||||
hyprshot
|
||||
hyprpicker
|
||||
hyprsunset
|
||||
brightnessctl
|
||||
pamixer
|
||||
playerctl
|
||||
gnome-themes-extra
|
||||
pavucontrol
|
||||
wl-clip-persist
|
||||
clipse
|
||||
];
|
||||
|
||||
# Desktop GUI applications
|
||||
desktopPkgs = with pkgs; [
|
||||
browser-previews.google-chrome
|
||||
foot # Wayland-native terminal emulator
|
||||
wofi # Application launcher for Wayland
|
||||
nautilus
|
||||
blueberry
|
||||
libnotify
|
||||
];
|
||||
in
|
||||
{
|
||||
packages = workstationProfile.packages ++ desktopPkgs;
|
||||
environment.persistence."/persist/home/ppetru".directories = [
|
||||
".cache"
|
||||
".config/google-chrome"
|
||||
];
|
||||
packages = workstationProfile.packages ++ hyprlandPkgs ++ desktopPkgs;
|
||||
}
|
||||
|
||||
5
home/profiles/minimal.nix
Normal file
5
home/profiles/minimal.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{ pkgs }:
|
||||
{
|
||||
# Minimal profile: reuses server.nix for basic package list
|
||||
packages = (import ./server.nix { inherit pkgs; }).packages;
|
||||
}
|
||||
@@ -13,7 +13,7 @@ let
|
||||
fishPkgs = with pkgs.fishPlugins; [
|
||||
pure
|
||||
# don't add failed commands to history
|
||||
sponge
|
||||
# sponge
|
||||
transient-fish
|
||||
];
|
||||
in
|
||||
|
||||
@@ -3,6 +3,10 @@ let
|
||||
serverProfile = import ./server.nix { inherit pkgs; };
|
||||
|
||||
cliPkgs = with pkgs; [
|
||||
ast-grep
|
||||
yq
|
||||
unstable.amp-cli
|
||||
unstable.beads
|
||||
unstable.claude-code
|
||||
unstable.codex
|
||||
unstable.gemini-cli
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ ./workstation.nix ];
|
||||
|
||||
# Hyprland window manager configuration
|
||||
wayland.windowManager.hyprland = {
|
||||
enable = true;
|
||||
settings = {
|
||||
monitor = "DP-1,preferred,auto,auto";
|
||||
|
||||
# Remap CapsLock to Super (Mod4)
|
||||
"$mod" = "SUPER";
|
||||
|
||||
input = {
|
||||
kb_options = "caps:super";
|
||||
follow_mouse = 1;
|
||||
sensitivity = 0;
|
||||
};
|
||||
|
||||
master = {
|
||||
new_status = "master";
|
||||
};
|
||||
|
||||
# Key bindings
|
||||
bind = [
|
||||
# Application launchers
|
||||
"$mod, Q, exec, foot"
|
||||
"$mod, D, exec, wofi --show drun"
|
||||
"$mod SHIFT, D, exec, wofi --show run"
|
||||
"$mod, C, killactive,"
|
||||
"$mod SHIFT, E, exit,"
|
||||
"$mod, V, togglefloating,"
|
||||
"$mod, P, pseudo,"
|
||||
"$mod, J, togglesplit,"
|
||||
|
||||
# Move focus with mod + arrow keys
|
||||
"$mod, left, movefocus, l"
|
||||
"$mod, right, movefocus, r"
|
||||
"$mod, up, movefocus, u"
|
||||
"$mod, down, movefocus, d"
|
||||
|
||||
# Move focus with mod + hjkl (vim-style)
|
||||
"$mod, h, movefocus, l"
|
||||
"$mod, l, movefocus, r"
|
||||
"$mod, k, movefocus, u"
|
||||
"$mod, j, movefocus, d"
|
||||
|
||||
# Switch workspaces with mod + [0-9]
|
||||
"$mod, 1, workspace, 1"
|
||||
"$mod, 2, workspace, 2"
|
||||
"$mod, 3, workspace, 3"
|
||||
"$mod, 4, workspace, 4"
|
||||
"$mod, 5, workspace, 5"
|
||||
"$mod, 6, workspace, 6"
|
||||
"$mod, 7, workspace, 7"
|
||||
"$mod, 8, workspace, 8"
|
||||
"$mod, 9, workspace, 9"
|
||||
"$mod, 0, workspace, 10"
|
||||
|
||||
# Move active window to a workspace with mod + SHIFT + [0-9]
|
||||
"$mod SHIFT, 1, movetoworkspace, 1"
|
||||
"$mod SHIFT, 2, movetoworkspace, 2"
|
||||
"$mod SHIFT, 3, movetoworkspace, 3"
|
||||
"$mod SHIFT, 4, movetoworkspace, 4"
|
||||
"$mod SHIFT, 5, movetoworkspace, 5"
|
||||
"$mod SHIFT, 6, movetoworkspace, 6"
|
||||
"$mod SHIFT, 7, movetoworkspace, 7"
|
||||
"$mod SHIFT, 8, movetoworkspace, 8"
|
||||
"$mod SHIFT, 9, movetoworkspace, 9"
|
||||
"$mod SHIFT, 0, movetoworkspace, 10"
|
||||
|
||||
# Scroll through existing workspaces with mod + scroll
|
||||
"$mod, mouse_down, workspace, e+1"
|
||||
"$mod, mouse_up, workspace, e-1"
|
||||
];
|
||||
|
||||
# Mouse bindings
|
||||
bindm = [
|
||||
"$mod, mouse:272, movewindow"
|
||||
"$mod, mouse:273, resizewindow"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
104
home/programs/desktop/btop.nix
Normal file
104
home/programs/desktop/btop.nix
Normal file
@@ -0,0 +1,104 @@
|
||||
# ABOUTME: Btop system monitor configuration with nix-colors theming
|
||||
# ABOUTME: Creates a custom theme file and configures btop settings
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
palette = config.colorScheme.palette;
|
||||
in
|
||||
{
|
||||
home.file.".config/btop/themes/${cfg.theme}.theme".text = ''
|
||||
# Main text color
|
||||
theme[main_fg]="${palette.base05}"
|
||||
|
||||
# Title color for boxes
|
||||
theme[title]="${palette.base05}"
|
||||
|
||||
# Highlight color for keyboard shortcuts
|
||||
theme[hi_fg]="${palette.base0D}"
|
||||
|
||||
# Background color of selected item in processes box
|
||||
theme[selected_bg]="${palette.base01}"
|
||||
|
||||
# Foreground color of selected item in processes box
|
||||
theme[selected_fg]="${palette.base05}"
|
||||
|
||||
# Color of inactive/disabled text
|
||||
theme[inactive_fg]="${palette.base04}"
|
||||
|
||||
# Misc colors for processes box
|
||||
theme[proc_misc]="${palette.base0D}"
|
||||
|
||||
# Box outline colors
|
||||
theme[cpu_box]="${palette.base0B}"
|
||||
theme[mem_box]="${palette.base09}"
|
||||
theme[net_box]="${palette.base0E}"
|
||||
theme[proc_box]="${palette.base0C}"
|
||||
|
||||
# Box divider line
|
||||
theme[div_line]="${palette.base04}"
|
||||
|
||||
# Temperature graph colors
|
||||
theme[temp_start]="${palette.base0B}"
|
||||
theme[temp_mid]="${palette.base0A}"
|
||||
theme[temp_end]="${palette.base08}"
|
||||
|
||||
# CPU graph colors
|
||||
theme[cpu_start]="${palette.base0B}"
|
||||
theme[cpu_mid]="${palette.base0A}"
|
||||
theme[cpu_end]="${palette.base08}"
|
||||
|
||||
# Mem/Disk meters
|
||||
theme[free_start]="${palette.base0B}"
|
||||
theme[cached_start]="${palette.base0A}"
|
||||
theme[available_start]="${palette.base09}"
|
||||
theme[used_start]="${palette.base08}"
|
||||
|
||||
# Network graph colors
|
||||
theme[download_start]="${palette.base0E}"
|
||||
theme[download_mid]="${palette.base0D}"
|
||||
theme[download_end]="${palette.base0C}"
|
||||
theme[upload_start]="${palette.base0E}"
|
||||
theme[upload_mid]="${palette.base0D}"
|
||||
theme[upload_end]="${palette.base0C}"
|
||||
'';
|
||||
|
||||
programs.btop = {
|
||||
enable = true;
|
||||
settings = {
|
||||
color_theme = cfg.theme;
|
||||
theme_background = false;
|
||||
truecolor = true;
|
||||
force_tty = false;
|
||||
vim_keys = true;
|
||||
rounded_corners = true;
|
||||
graph_symbol = "braille";
|
||||
shown_boxes = "cpu mem net proc";
|
||||
update_ms = 2000;
|
||||
proc_sorting = "cpu lazy";
|
||||
proc_colors = true;
|
||||
proc_gradient = false;
|
||||
proc_per_core = false;
|
||||
proc_mem_bytes = true;
|
||||
proc_cpu_graphs = true;
|
||||
show_uptime = true;
|
||||
check_temp = true;
|
||||
show_coretemp = true;
|
||||
temp_scale = "celsius";
|
||||
show_cpu_freq = true;
|
||||
clock_format = "%X";
|
||||
background_update = true;
|
||||
mem_graphs = true;
|
||||
show_swap = true;
|
||||
swap_disk = true;
|
||||
show_disks = true;
|
||||
only_physical = true;
|
||||
use_fstab = true;
|
||||
show_io_stat = true;
|
||||
net_auto = true;
|
||||
net_sync = true;
|
||||
show_battery = true;
|
||||
log_level = "WARNING";
|
||||
};
|
||||
};
|
||||
}
|
||||
21
home/programs/desktop/config.nix
Normal file
21
home/programs/desktop/config.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
# ABOUTME: Shared configuration values for desktop environment
|
||||
# ABOUTME: Centralizes user info, theme, fonts, and display settings
|
||||
|
||||
{
|
||||
user = {
|
||||
fullName = "Petru Paler";
|
||||
email = "petru@paler.net";
|
||||
};
|
||||
|
||||
theme = "tokyo-night";
|
||||
base16Theme = "tokyo-night-dark";
|
||||
|
||||
primaryFont = "Liberation Sans 11";
|
||||
monoFont = "CaskaydiaMono Nerd Font";
|
||||
|
||||
scale = 1.5;
|
||||
monitors = [ "DP-1,preferred,auto,1.5" ];
|
||||
|
||||
# Wallpaper for tokyo-night theme
|
||||
wallpaper = "1-Pawel-Czerwinski-Abstract-Purple-Blue.jpg";
|
||||
}
|
||||
59
home/programs/desktop/default.nix
Normal file
59
home/programs/desktop/default.nix
Normal file
@@ -0,0 +1,59 @@
|
||||
# ABOUTME: Desktop environment home-manager configuration
|
||||
# ABOUTME: Imports all desktop modules and sets up nix-colors theming
|
||||
|
||||
{ config, pkgs, lib, nix-colors, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../workstation.nix
|
||||
./ghostty.nix
|
||||
./hyprland
|
||||
./waybar.nix
|
||||
./wofi.nix
|
||||
./mako.nix
|
||||
./hyprpaper.nix
|
||||
./hypridle.nix
|
||||
./hyprlock.nix
|
||||
./starship.nix
|
||||
./vscode.nix
|
||||
./btop.nix
|
||||
./git.nix
|
||||
];
|
||||
|
||||
# Set up nix-colors with our theme
|
||||
colorScheme = nix-colors.colorSchemes.${cfg.base16Theme};
|
||||
|
||||
# Override ghostty to use unstable version (1.2.0+) for ssh-terminfo support
|
||||
programs.ghostty.package = pkgs.unstable.ghostty;
|
||||
|
||||
# Extend ghostty configuration
|
||||
programs.ghostty.settings = {
|
||||
shell-integration-features = "ssh-terminfo";
|
||||
};
|
||||
|
||||
# GTK theme (dark for tokyo-night)
|
||||
gtk = {
|
||||
enable = true;
|
||||
theme = {
|
||||
name = "Adwaita-dark";
|
||||
package = pkgs.gnome-themes-extra;
|
||||
};
|
||||
};
|
||||
|
||||
# Enable neovim (placeholder for future config)
|
||||
programs.neovim.enable = true;
|
||||
|
||||
# direnv
|
||||
programs.direnv = {
|
||||
enable = true;
|
||||
nix-direnv.enable = true;
|
||||
};
|
||||
|
||||
# zoxide (directory jumping)
|
||||
programs.zoxide = {
|
||||
enable = true;
|
||||
enableBashIntegration = true;
|
||||
};
|
||||
}
|
||||
60
home/programs/desktop/ghostty.nix
Normal file
60
home/programs/desktop/ghostty.nix
Normal file
@@ -0,0 +1,60 @@
|
||||
# ABOUTME: Ghostty terminal emulator configuration with nix-colors theming
|
||||
# ABOUTME: Creates a custom color theme from the nix-colors palette
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
palette = config.colorScheme.palette;
|
||||
in
|
||||
{
|
||||
programs.ghostty = {
|
||||
enable = true;
|
||||
settings = {
|
||||
window-padding-x = 14;
|
||||
window-padding-y = 14;
|
||||
background-opacity = 0.95;
|
||||
window-decoration = "none";
|
||||
|
||||
font-family = cfg.monoFont;
|
||||
font-size = 12;
|
||||
|
||||
theme = "desktop-theme";
|
||||
keybind = [
|
||||
"ctrl+k=reset"
|
||||
];
|
||||
};
|
||||
themes = {
|
||||
desktop-theme = {
|
||||
background = "#${palette.base00}";
|
||||
foreground = "#${palette.base05}";
|
||||
|
||||
selection-background = "#${palette.base02}";
|
||||
selection-foreground = "#${palette.base00}";
|
||||
palette = [
|
||||
"0=#${palette.base00}"
|
||||
"1=#${palette.base08}"
|
||||
"2=#${palette.base0B}"
|
||||
"3=#${palette.base0A}"
|
||||
"4=#${palette.base0D}"
|
||||
"5=#${palette.base0E}"
|
||||
"6=#${palette.base0C}"
|
||||
"7=#${palette.base05}"
|
||||
"8=#${palette.base03}"
|
||||
"9=#${palette.base08}"
|
||||
"10=#${palette.base0B}"
|
||||
"11=#${palette.base0A}"
|
||||
"12=#${palette.base0D}"
|
||||
"13=#${palette.base0E}"
|
||||
"14=#${palette.base0C}"
|
||||
"15=#${palette.base07}"
|
||||
"16=#${palette.base09}"
|
||||
"17=#${palette.base0F}"
|
||||
"18=#${palette.base01}"
|
||||
"19=#${palette.base02}"
|
||||
"20=#${palette.base04}"
|
||||
"21=#${palette.base06}"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
24
home/programs/desktop/git.nix
Normal file
24
home/programs/desktop/git.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
# ABOUTME: Git and GitHub CLI configuration
|
||||
# ABOUTME: Sets up git with user info and gh CLI integration
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
in
|
||||
{
|
||||
programs.git = {
|
||||
enable = true;
|
||||
settings = {
|
||||
user.name = cfg.user.fullName;
|
||||
user.email = cfg.user.email;
|
||||
credential.helper = "store";
|
||||
};
|
||||
};
|
||||
|
||||
programs.gh = {
|
||||
enable = true;
|
||||
gitCredentialHelper = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
27
home/programs/desktop/hypridle.nix
Normal file
27
home/programs/desktop/hypridle.nix
Normal file
@@ -0,0 +1,27 @@
|
||||
# ABOUTME: Hypridle idle daemon configuration
|
||||
# ABOUTME: Handles screen locking and DPMS after idle timeout
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
services.hypridle = {
|
||||
enable = true;
|
||||
settings = {
|
||||
general = {
|
||||
lock_cmd = "pidof hyprlock || hyprlock";
|
||||
before_sleep_cmd = "loginctl lock-session";
|
||||
after_sleep_cmd = "hyprctl dispatch dpms on";
|
||||
};
|
||||
listener = [
|
||||
{
|
||||
timeout = 300;
|
||||
on-timeout = "loginctl lock-session";
|
||||
}
|
||||
{
|
||||
timeout = 330;
|
||||
on-timeout = "hyprctl dispatch dpms off";
|
||||
on-resume = "hyprctl dispatch dpms on && brightnessctl -r";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
17
home/programs/desktop/hyprland/autostart.nix
Normal file
17
home/programs/desktop/hyprland/autostart.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
# ABOUTME: Hyprland autostart configuration
|
||||
# ABOUTME: Defines programs to run at Hyprland startup
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
exec-once = [
|
||||
"hyprsunset"
|
||||
"systemctl --user start hyprpolkitagent"
|
||||
"wl-clip-persist --clipboard regular & clipse -listen"
|
||||
];
|
||||
|
||||
exec = [
|
||||
"pkill -SIGUSR2 waybar || waybar"
|
||||
];
|
||||
};
|
||||
}
|
||||
99
home/programs/desktop/hyprland/bindings.nix
Normal file
99
home/programs/desktop/hyprland/bindings.nix
Normal file
@@ -0,0 +1,99 @@
|
||||
# ABOUTME: Hyprland keybindings configuration
|
||||
# ABOUTME: Defines keyboard and mouse shortcuts for window management
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
bind = [
|
||||
# Application launchers
|
||||
"$mod, Space, exec, $menu"
|
||||
"$mod, Return, exec, $terminal"
|
||||
"$mod, E, exec, $fileManager"
|
||||
"$mod, B, exec, $browser"
|
||||
|
||||
# Window management
|
||||
"$mod, W, killactive,"
|
||||
"$mod, BackSpace, killactive,"
|
||||
"$mod, V, togglefloating,"
|
||||
"$mod SHIFT, equal, fullscreen,"
|
||||
"$mod, J, togglesplit,"
|
||||
"$mod, P, pseudo,"
|
||||
|
||||
# Focus navigation
|
||||
"$mod, left, movefocus, l"
|
||||
"$mod, right, movefocus, r"
|
||||
"$mod, up, movefocus, u"
|
||||
"$mod, down, movefocus, d"
|
||||
|
||||
# Workspace switching
|
||||
"$mod, 1, workspace, 1"
|
||||
"$mod, 2, workspace, 2"
|
||||
"$mod, 3, workspace, 3"
|
||||
"$mod, 4, workspace, 4"
|
||||
"$mod, 5, workspace, 5"
|
||||
"$mod, 6, workspace, 6"
|
||||
"$mod, 7, workspace, 7"
|
||||
"$mod, 8, workspace, 8"
|
||||
"$mod, 9, workspace, 9"
|
||||
"$mod, 0, workspace, 10"
|
||||
|
||||
# Move window to workspace
|
||||
"$mod SHIFT, 1, movetoworkspace, 1"
|
||||
"$mod SHIFT, 2, movetoworkspace, 2"
|
||||
"$mod SHIFT, 3, movetoworkspace, 3"
|
||||
"$mod SHIFT, 4, movetoworkspace, 4"
|
||||
"$mod SHIFT, 5, movetoworkspace, 5"
|
||||
"$mod SHIFT, 6, movetoworkspace, 6"
|
||||
"$mod SHIFT, 7, movetoworkspace, 7"
|
||||
"$mod SHIFT, 8, movetoworkspace, 8"
|
||||
"$mod SHIFT, 9, movetoworkspace, 9"
|
||||
"$mod SHIFT, 0, movetoworkspace, 10"
|
||||
|
||||
# Workspace navigation
|
||||
"$mod, comma, workspace, m-1"
|
||||
"$mod, period, workspace, m+1"
|
||||
|
||||
# Window resize
|
||||
"$mod, minus, splitratio, -0.1"
|
||||
"$mod, equal, splitratio, +0.1"
|
||||
|
||||
# Lock screen
|
||||
"$mod, Escape, exec, loginctl lock-session"
|
||||
|
||||
# Screenshots
|
||||
", Print, exec, hyprshot -m region"
|
||||
"SHIFT, Print, exec, hyprshot -m window"
|
||||
"CTRL, Print, exec, hyprshot -m output"
|
||||
|
||||
# Color picker
|
||||
"$mod SHIFT, C, exec, hyprpicker -a"
|
||||
|
||||
# Clipboard manager
|
||||
"$mod SHIFT, V, exec, ghostty --class=clipse -e clipse"
|
||||
];
|
||||
|
||||
bindm = [
|
||||
# Mouse bindings for window management
|
||||
"$mod, mouse:272, movewindow"
|
||||
"$mod, mouse:273, resizewindow"
|
||||
];
|
||||
|
||||
binde = [
|
||||
# Repeatable bindings for media controls
|
||||
", XF86AudioRaiseVolume, exec, wpctl set-volume -l 1.5 @DEFAULT_AUDIO_SINK@ 5%+"
|
||||
", XF86AudioLowerVolume, exec, wpctl set-volume @DEFAULT_AUDIO_SINK@ 5%-"
|
||||
", XF86AudioMute, exec, wpctl set-mute @DEFAULT_AUDIO_SINK@ toggle"
|
||||
|
||||
# Brightness controls
|
||||
", XF86MonBrightnessUp, exec, brightnessctl s +5%"
|
||||
", XF86MonBrightnessDown, exec, brightnessctl s 5%-"
|
||||
];
|
||||
|
||||
bindl = [
|
||||
# Media player controls
|
||||
", XF86AudioNext, exec, playerctl next"
|
||||
", XF86AudioPrev, exec, playerctl previous"
|
||||
", XF86AudioPlay, exec, playerctl play-pause"
|
||||
];
|
||||
};
|
||||
}
|
||||
39
home/programs/desktop/hyprland/default.nix
Normal file
39
home/programs/desktop/hyprland/default.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
# ABOUTME: Hyprland window manager home-manager configuration
|
||||
# ABOUTME: Imports all hyprland submodules for complete WM setup
|
||||
|
||||
{ config, pkgs, lib, ... }:
|
||||
let
|
||||
cfg = import ../config.nix;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./bindings.nix
|
||||
./autostart.nix
|
||||
./input.nix
|
||||
./looknfeel.nix
|
||||
./windows.nix
|
||||
./envs.nix
|
||||
];
|
||||
|
||||
wayland.windowManager.hyprland = {
|
||||
enable = true;
|
||||
systemd.enable = true;
|
||||
|
||||
settings = {
|
||||
# Monitor configuration
|
||||
monitor = cfg.monitors;
|
||||
|
||||
# Default applications
|
||||
"$terminal" = "ghostty";
|
||||
"$fileManager" = "nautilus";
|
||||
"$browser" = "google-chrome-stable --new-window --ozone-platform=wayland";
|
||||
"$menu" = "wofi --show drun";
|
||||
|
||||
# Mod key
|
||||
"$mod" = "SUPER";
|
||||
};
|
||||
};
|
||||
|
||||
# Hyprland polkit agent for privilege escalation
|
||||
services.hyprpolkitagent.enable = true;
|
||||
}
|
||||
56
home/programs/desktop/hyprland/envs.nix
Normal file
56
home/programs/desktop/hyprland/envs.nix
Normal file
@@ -0,0 +1,56 @@
|
||||
# ABOUTME: Hyprland environment variables configuration
|
||||
# ABOUTME: Sets up Wayland, cursor, and application environment variables
|
||||
|
||||
{ config, lib, pkgs, osConfig ? { }, ... }:
|
||||
let
|
||||
cfg = import ../config.nix;
|
||||
hasNvidiaDrivers = builtins.elem "nvidia" (osConfig.services.xserver.videoDrivers or []);
|
||||
nvidiaEnv = [
|
||||
"NVD_BACKEND,direct"
|
||||
"LIBVA_DRIVER_NAME,nvidia"
|
||||
"__GLX_VENDOR_LIBRARY_NAME,nvidia"
|
||||
];
|
||||
in
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
env = (lib.optionals hasNvidiaDrivers nvidiaEnv) ++ [
|
||||
"GDK_SCALE,${toString cfg.scale}"
|
||||
|
||||
# Cursor size and theme
|
||||
"XCURSOR_SIZE,24"
|
||||
"HYPRCURSOR_SIZE,24"
|
||||
"XCURSOR_THEME,Adwaita"
|
||||
"HYPRCURSOR_THEME,Adwaita"
|
||||
|
||||
# Force Wayland for applications
|
||||
"GDK_BACKEND,wayland"
|
||||
"QT_QPA_PLATFORM,wayland"
|
||||
"QT_STYLE_OVERRIDE,kvantum"
|
||||
"SDL_VIDEODRIVER,wayland"
|
||||
"MOZ_ENABLE_WAYLAND,1"
|
||||
"ELECTRON_OZONE_PLATFORM_HINT,wayland"
|
||||
"OZONE_PLATFORM,wayland"
|
||||
|
||||
# Chromium Wayland support
|
||||
"CHROMIUM_FLAGS,\"--enable-features=UseOzonePlatform --ozone-platform=wayland --gtk-version=4\""
|
||||
|
||||
# Make .desktop files available for wofi
|
||||
"XDG_DATA_DIRS,$XDG_DATA_DIRS:$HOME/.nix-profile/share:/nix/var/nix/profiles/default/share"
|
||||
|
||||
# XCompose support
|
||||
"XCOMPOSEFILE,~/.XCompose"
|
||||
"EDITOR,nvim"
|
||||
|
||||
# GTK dark theme
|
||||
"GTK_THEME,Adwaita:dark"
|
||||
];
|
||||
|
||||
xwayland = {
|
||||
force_zero_scaling = true;
|
||||
};
|
||||
|
||||
ecosystem = {
|
||||
no_update_news = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
19
home/programs/desktop/hyprland/input.nix
Normal file
19
home/programs/desktop/hyprland/input.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
# ABOUTME: Hyprland input and gesture configuration
|
||||
# ABOUTME: Keyboard layout, mouse settings, and touchpad behavior
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
input = lib.mkDefault {
|
||||
kb_layout = "us";
|
||||
kb_options = "caps:super,compose:ralt";
|
||||
|
||||
follow_mouse = 1;
|
||||
sensitivity = 0;
|
||||
|
||||
touchpad = {
|
||||
natural_scroll = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
89
home/programs/desktop/hyprland/looknfeel.nix
Normal file
89
home/programs/desktop/hyprland/looknfeel.nix
Normal file
@@ -0,0 +1,89 @@
|
||||
# ABOUTME: Hyprland visual appearance configuration
|
||||
# ABOUTME: Window gaps, borders, animations, and decorations with nix-colors theming
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
palette = config.colorScheme.palette;
|
||||
hexToRgba = hex: alpha: "rgba(${hex}${alpha})";
|
||||
inactiveBorder = hexToRgba palette.base09 "aa";
|
||||
activeBorder = hexToRgba palette.base0D "aa";
|
||||
in
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
general = {
|
||||
gaps_in = 5;
|
||||
gaps_out = 10;
|
||||
border_size = 2;
|
||||
|
||||
"col.active_border" = activeBorder;
|
||||
"col.inactive_border" = inactiveBorder;
|
||||
|
||||
resize_on_border = false;
|
||||
allow_tearing = false;
|
||||
layout = "dwindle";
|
||||
};
|
||||
|
||||
decoration = {
|
||||
rounding = 4;
|
||||
|
||||
shadow = {
|
||||
enabled = false;
|
||||
range = 30;
|
||||
render_power = 3;
|
||||
ignore_window = true;
|
||||
color = "rgba(00000045)";
|
||||
};
|
||||
|
||||
blur = {
|
||||
enabled = true;
|
||||
size = 5;
|
||||
passes = 2;
|
||||
vibrancy = 0.1696;
|
||||
};
|
||||
};
|
||||
|
||||
animations = {
|
||||
enabled = true;
|
||||
|
||||
bezier = [
|
||||
"easeOutQuint,0.23,1,0.32,1"
|
||||
"easeInOutCubic,0.65,0.05,0.36,1"
|
||||
"linear,0,0,1,1"
|
||||
"almostLinear,0.5,0.5,0.75,1.0"
|
||||
"quick,0.15,0,0.1,1"
|
||||
];
|
||||
|
||||
animation = [
|
||||
"global, 1, 10, default"
|
||||
"border, 1, 5.39, easeOutQuint"
|
||||
"windows, 1, 4.79, easeOutQuint"
|
||||
"windowsIn, 1, 4.1, easeOutQuint, popin 87%"
|
||||
"windowsOut, 1, 1.49, linear, popin 87%"
|
||||
"fadeIn, 1, 1.73, almostLinear"
|
||||
"fadeOut, 1, 1.46, almostLinear"
|
||||
"fade, 1, 3.03, quick"
|
||||
"layers, 1, 3.81, easeOutQuint"
|
||||
"layersIn, 1, 4, easeOutQuint, fade"
|
||||
"layersOut, 1, 1.5, linear, fade"
|
||||
"fadeLayersIn, 1, 1.79, almostLinear"
|
||||
"fadeLayersOut, 1, 1.39, almostLinear"
|
||||
"workspaces, 0, 0, ease"
|
||||
];
|
||||
};
|
||||
|
||||
dwindle = {
|
||||
pseudotile = true;
|
||||
preserve_split = true;
|
||||
force_split = 2;
|
||||
};
|
||||
|
||||
master = {
|
||||
new_status = "master";
|
||||
};
|
||||
|
||||
misc = {
|
||||
disable_hyprland_logo = true;
|
||||
disable_splash_rendering = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
31
home/programs/desktop/hyprland/windows.nix
Normal file
31
home/programs/desktop/hyprland/windows.nix
Normal file
@@ -0,0 +1,31 @@
|
||||
# ABOUTME: Hyprland window rules configuration
|
||||
# ABOUTME: Defines per-application window behavior and layer rules
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
wayland.windowManager.hyprland.settings = {
|
||||
windowrule = [
|
||||
"suppressevent maximize, class:.*"
|
||||
"tile, class:^(chromium)$"
|
||||
"float, class:^(org.pulseaudio.pavucontrol|blueberry.py)$"
|
||||
"float, class:^(steam)$"
|
||||
"fullscreen, class:^(com.libretro.RetroArch)$"
|
||||
"opacity 0.97 0.9, class:.*"
|
||||
"opacity 1 1, class:^(chromium|google-chrome|google-chrome-unstable)$, title:.*Youtube.*"
|
||||
"opacity 1 0.97, class:^(chromium|google-chrome|google-chrome-unstable)$"
|
||||
"opacity 0.97 0.9, initialClass:^(chrome-.*-Default)$"
|
||||
"opacity 1 1, initialClass:^(chrome-youtube.*-Default)$"
|
||||
"opacity 1 1, class:^(zoom|vlc|org.kde.kdenlive|com.obsproject.Studio)$"
|
||||
"opacity 1 1, class:^(com.libretro.RetroArch|steam)$"
|
||||
"nofocus,class:^$,title:^$,xwayland:1,floating:1,fullscreen:0,pinned:0"
|
||||
"float, class:(clipse)"
|
||||
"size 622 652, class:(clipse)"
|
||||
"stayfocused, class:(clipse)"
|
||||
];
|
||||
|
||||
layerrule = [
|
||||
"blur,wofi"
|
||||
"blur,waybar"
|
||||
];
|
||||
};
|
||||
}
|
||||
70
home/programs/desktop/hyprlock.nix
Normal file
70
home/programs/desktop/hyprlock.nix
Normal file
@@ -0,0 +1,70 @@
|
||||
# ABOUTME: Hyprlock screen locker configuration with nix-colors theming
|
||||
# ABOUTME: Configures lock screen appearance with fingerprint support
|
||||
|
||||
{ config, pkgs, nix-colors, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
palette = config.colorScheme.palette;
|
||||
convert = nix-colors.lib.conversions.hexToRGBString;
|
||||
wallpaperPath = "~/Pictures/Wallpapers/${cfg.wallpaper}";
|
||||
|
||||
backgroundRgb = "rgba(${convert ", " palette.base00}, 0.8)";
|
||||
surfaceRgb = "rgb(${convert ", " palette.base02})";
|
||||
foregroundRgb = "rgb(${convert ", " palette.base05})";
|
||||
foregroundMutedRgb = "rgb(${convert ", " palette.base04})";
|
||||
in
|
||||
{
|
||||
programs.hyprlock = {
|
||||
enable = true;
|
||||
settings = {
|
||||
general = {
|
||||
disable_loading_bar = true;
|
||||
no_fade_in = false;
|
||||
};
|
||||
auth = {
|
||||
fingerprint.enabled = true;
|
||||
};
|
||||
background = {
|
||||
monitor = "";
|
||||
path = wallpaperPath;
|
||||
};
|
||||
|
||||
input-field = {
|
||||
monitor = "";
|
||||
size = "600, 100";
|
||||
position = "0, 0";
|
||||
halign = "center";
|
||||
valign = "center";
|
||||
|
||||
inner_color = surfaceRgb;
|
||||
outer_color = foregroundRgb;
|
||||
outline_thickness = 4;
|
||||
|
||||
font_family = cfg.monoFont;
|
||||
font_size = 32;
|
||||
font_color = foregroundRgb;
|
||||
|
||||
placeholder_color = foregroundMutedRgb;
|
||||
placeholder_text = " Enter Password ";
|
||||
check_color = "rgba(131, 192, 146, 1.0)";
|
||||
fail_text = "Wrong";
|
||||
|
||||
rounding = 0;
|
||||
shadow_passes = 0;
|
||||
fade_on_empty = false;
|
||||
};
|
||||
|
||||
label = {
|
||||
monitor = "";
|
||||
text = "$FPRINTPROMPT";
|
||||
text_align = "center";
|
||||
color = "rgb(211, 198, 170)";
|
||||
font_size = 24;
|
||||
font_family = cfg.monoFont;
|
||||
position = "0, -100";
|
||||
halign = "center";
|
||||
valign = "center";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
23
home/programs/desktop/hyprpaper.nix
Normal file
23
home/programs/desktop/hyprpaper.nix
Normal file
@@ -0,0 +1,23 @@
|
||||
# ABOUTME: Hyprpaper wallpaper service configuration
|
||||
# ABOUTME: Sets up wallpaper based on theme selection
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
wallpaperPath = "~/Pictures/Wallpapers/${cfg.wallpaper}";
|
||||
in
|
||||
{
|
||||
# Copy wallpapers to Pictures directory
|
||||
home.file."Pictures/Wallpapers" = {
|
||||
source = ../../../common/desktop/assets/wallpapers;
|
||||
recursive = true;
|
||||
};
|
||||
|
||||
services.hyprpaper = {
|
||||
enable = true;
|
||||
settings = {
|
||||
preload = [ wallpaperPath ];
|
||||
wallpaper = [ ",${wallpaperPath}" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
41
home/programs/desktop/mako.nix
Normal file
41
home/programs/desktop/mako.nix
Normal file
@@ -0,0 +1,41 @@
|
||||
# ABOUTME: Mako notification daemon configuration with nix-colors theming
|
||||
# ABOUTME: Configures notification appearance and behavior
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
palette = config.colorScheme.palette;
|
||||
in
|
||||
{
|
||||
services.mako = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
background-color = "#${palette.base00}";
|
||||
text-color = "#${palette.base05}";
|
||||
border-color = "#${palette.base04}";
|
||||
progress-color = "#${palette.base0D}";
|
||||
|
||||
width = 420;
|
||||
height = 110;
|
||||
padding = "10";
|
||||
margin = "10";
|
||||
border-size = 2;
|
||||
border-radius = 0;
|
||||
|
||||
anchor = "top-right";
|
||||
layer = "overlay";
|
||||
|
||||
default-timeout = 5000;
|
||||
ignore-timeout = false;
|
||||
max-visible = 5;
|
||||
sort = "-time";
|
||||
|
||||
group-by = "app-name";
|
||||
|
||||
actions = true;
|
||||
|
||||
format = "<b>%s</b>\\n%b";
|
||||
markup = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
7
home/programs/desktop/starship.nix
Normal file
7
home/programs/desktop/starship.nix
Normal file
@@ -0,0 +1,7 @@
|
||||
# ABOUTME: Starship prompt configuration
|
||||
# ABOUTME: Enables the cross-shell prompt with default settings
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
programs.starship.enable = true;
|
||||
}
|
||||
32
home/programs/desktop/themes.nix
Normal file
32
home/programs/desktop/themes.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
# ABOUTME: Theme definitions mapping theme names to base16 and VSCode themes
|
||||
# ABOUTME: Used by vscode and other apps that need theme name mapping
|
||||
|
||||
{
|
||||
"tokyo-night" = {
|
||||
base16Theme = "tokyo-night-dark";
|
||||
vscodeTheme = "Tokyo Night";
|
||||
};
|
||||
"catppuccin-macchiato" = {
|
||||
vscodeTheme = "Catppuccin Macchiato";
|
||||
};
|
||||
"kanagawa" = {
|
||||
base16Theme = "kanagawa";
|
||||
vscodeTheme = "Kanagawa";
|
||||
};
|
||||
"everforest" = {
|
||||
base16Theme = "everforest";
|
||||
vscodeTheme = "Everforest Dark";
|
||||
};
|
||||
"nord" = {
|
||||
base16Theme = "nord";
|
||||
vscodeTheme = "Nord";
|
||||
};
|
||||
"gruvbox" = {
|
||||
base16Theme = "gruvbox-dark-hard";
|
||||
vscodeTheme = "Gruvbox Dark Hard";
|
||||
};
|
||||
"gruvbox-light" = {
|
||||
base16Theme = "gruvbox-light-medium";
|
||||
vscodeTheme = "Gruvbox Light Medium";
|
||||
};
|
||||
}
|
||||
54
home/programs/desktop/vscode.nix
Normal file
54
home/programs/desktop/vscode.nix
Normal file
@@ -0,0 +1,54 @@
|
||||
# ABOUTME: VSCode configuration with theme extensions
|
||||
# ABOUTME: Installs vim keybindings and color scheme extensions
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
themes = import ./themes.nix;
|
||||
theme = themes.${cfg.theme};
|
||||
in
|
||||
{
|
||||
programs.vscode = {
|
||||
enable = true;
|
||||
profiles.default = {
|
||||
extensions =
|
||||
with pkgs.vscode-extensions;
|
||||
[
|
||||
bbenoist.nix
|
||||
vscodevim.vim
|
||||
]
|
||||
++ pkgs.vscode-utils.extensionsFromVscodeMarketplace [
|
||||
{
|
||||
name = "everforest";
|
||||
publisher = "sainnhe";
|
||||
version = "0.3.0";
|
||||
sha256 = "sha256-nZirzVvM160ZTpBLTimL2X35sIGy5j2LQOok7a2Yc7U=";
|
||||
}
|
||||
{
|
||||
name = "tokyo-night";
|
||||
publisher = "enkia";
|
||||
version = "1.1.2";
|
||||
sha256 = "sha256-oW0bkLKimpcjzxTb/yjShagjyVTUFEg198oPbY5J2hM=";
|
||||
}
|
||||
{
|
||||
name = "kanagawa";
|
||||
publisher = "qufiwefefwoyn";
|
||||
version = "1.5.1";
|
||||
sha256 = "sha256-AGGioXcK/fjPaFaWk2jqLxovUNR59gwpotcSpGNbj1c=";
|
||||
}
|
||||
{
|
||||
name = "nord-visual-studio-code";
|
||||
publisher = "arcticicestudio";
|
||||
version = "0.19.0";
|
||||
sha256 = "sha256-awbqFv6YuYI0tzM/QbHRTUl4B2vNUdy52F4nPmv+dRU=";
|
||||
}
|
||||
{
|
||||
name = "gruvbox";
|
||||
publisher = "jdinhlife";
|
||||
version = "1.28.0";
|
||||
sha256 = "sha256-XwQzbbZU6MfYcT50/0YgQp8UaOeQskEvEQPZXG72lLk=";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
182
home/programs/desktop/waybar.nix
Normal file
182
home/programs/desktop/waybar.nix
Normal file
@@ -0,0 +1,182 @@
|
||||
# ABOUTME: Waybar status bar configuration with nix-colors theming
|
||||
# ABOUTME: Configures system tray, workspaces, and status indicators
|
||||
|
||||
{ config, pkgs, nix-colors, ... }:
|
||||
let
|
||||
palette = config.colorScheme.palette;
|
||||
convert = nix-colors.lib.conversions.hexToRGBString;
|
||||
backgroundRgb = "rgb(${convert ", " palette.base00})";
|
||||
foregroundRgb = "rgb(${convert ", " palette.base05})";
|
||||
in
|
||||
{
|
||||
home.file.".config/waybar/theme.css".text = ''
|
||||
@define-color background ${backgroundRgb};
|
||||
* {
|
||||
color: ${foregroundRgb};
|
||||
}
|
||||
|
||||
window#waybar {
|
||||
background-color: ${backgroundRgb};
|
||||
}
|
||||
'';
|
||||
|
||||
home.file.".config/waybar/style.css".text = ''
|
||||
@import "./theme.css";
|
||||
* {
|
||||
border: none;
|
||||
border-radius: 0;
|
||||
min-height: 0;
|
||||
font-family: CaskaydiaMono Nerd Font;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
#workspaces {
|
||||
margin-left: 7px;
|
||||
}
|
||||
|
||||
#workspaces button {
|
||||
all: initial;
|
||||
padding: 2px 6px;
|
||||
margin-right: 3px;
|
||||
}
|
||||
|
||||
#custom-dropbox,
|
||||
#cpu,
|
||||
#power-profiles-daemon,
|
||||
#battery,
|
||||
#network,
|
||||
#bluetooth,
|
||||
#wireplumber,
|
||||
#tray,
|
||||
#clock {
|
||||
background-color: transparent;
|
||||
min-width: 12px;
|
||||
margin-right: 13px;
|
||||
}
|
||||
|
||||
tooltip {
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
tooltip label {
|
||||
padding: 2px;
|
||||
}
|
||||
'';
|
||||
|
||||
programs.waybar = {
|
||||
enable = true;
|
||||
settings = [
|
||||
{
|
||||
layer = "top";
|
||||
position = "top";
|
||||
spacing = 0;
|
||||
height = 26;
|
||||
modules-left = [ "hyprland/workspaces" ];
|
||||
modules-center = [ "clock" ];
|
||||
modules-right = [
|
||||
"tray"
|
||||
"bluetooth"
|
||||
"network"
|
||||
"wireplumber"
|
||||
"cpu"
|
||||
"power-profiles-daemon"
|
||||
"battery"
|
||||
];
|
||||
"hyprland/workspaces" = {
|
||||
on-click = "activate";
|
||||
format = "{icon}";
|
||||
format-icons = {
|
||||
default = "";
|
||||
"1" = "1";
|
||||
"2" = "2";
|
||||
"3" = "3";
|
||||
"4" = "4";
|
||||
"5" = "5";
|
||||
"6" = "6";
|
||||
"7" = "7";
|
||||
"8" = "8";
|
||||
"9" = "9";
|
||||
active = "";
|
||||
};
|
||||
persistent-workspaces = {
|
||||
"1" = [ ];
|
||||
"2" = [ ];
|
||||
"3" = [ ];
|
||||
"4" = [ ];
|
||||
"5" = [ ];
|
||||
};
|
||||
};
|
||||
cpu = {
|
||||
interval = 5;
|
||||
format = "";
|
||||
on-click = "ghostty -e btop";
|
||||
};
|
||||
clock = {
|
||||
format = "{:%A %I:%M %p}";
|
||||
format-alt = "{:%d %B W%V %Y}";
|
||||
tooltip = false;
|
||||
};
|
||||
network = {
|
||||
format-icons = [ "" "" "" "" "" ];
|
||||
format = "{icon}";
|
||||
format-wifi = "{icon}";
|
||||
format-ethernet = "";
|
||||
format-disconnected = "";
|
||||
tooltip-format-wifi = "{essid} ({frequency} GHz)\n⇣{bandwidthDownBytes} ⇡{bandwidthUpBytes}";
|
||||
tooltip-format-ethernet = "⇣{bandwidthDownBytes} ⇡{bandwidthUpBytes}";
|
||||
tooltip-format-disconnected = "Disconnected";
|
||||
interval = 3;
|
||||
nospacing = 1;
|
||||
on-click = "ghostty -e nmcli";
|
||||
};
|
||||
battery = {
|
||||
interval = 5;
|
||||
format = "{capacity}% {icon}";
|
||||
format-discharging = "{icon}";
|
||||
format-charging = "{icon}";
|
||||
format-plugged = "";
|
||||
format-icons = {
|
||||
charging = [ "" "" "" "" "" "" "" "" "" "" ];
|
||||
default = [ "" "" "" "" "" "" "" "" "" "" ];
|
||||
};
|
||||
format-full = "Charged ";
|
||||
tooltip-format-discharging = "{power:>1.0f}W↓ {capacity}%";
|
||||
tooltip-format-charging = "{power:>1.0f}W↑ {capacity}%";
|
||||
states = {
|
||||
warning = 20;
|
||||
critical = 10;
|
||||
};
|
||||
};
|
||||
bluetooth = {
|
||||
format = "";
|
||||
format-disabled = "";
|
||||
format-connected = "";
|
||||
tooltip-format = "Devices connected: {num_connections}";
|
||||
on-click = "blueberry";
|
||||
};
|
||||
wireplumber = {
|
||||
format = "";
|
||||
format-muted = "";
|
||||
scroll-step = 5;
|
||||
on-click = "pavucontrol";
|
||||
tooltip-format = "Playing at {volume}%";
|
||||
on-click-right = "wpctl set-mute @DEFAULT_AUDIO_SINK@ toggle";
|
||||
max-volume = 150;
|
||||
};
|
||||
tray = {
|
||||
spacing = 13;
|
||||
};
|
||||
power-profiles-daemon = {
|
||||
format = "{icon}";
|
||||
tooltip-format = "Power profile: {profile}";
|
||||
tooltip = true;
|
||||
format-icons = {
|
||||
power-saver = "";
|
||||
balanced = "";
|
||||
performance = "";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
102
home/programs/desktop/wofi.nix
Normal file
102
home/programs/desktop/wofi.nix
Normal file
@@ -0,0 +1,102 @@
|
||||
# ABOUTME: Wofi application launcher configuration with nix-colors theming
|
||||
# ABOUTME: Configures the drun launcher appearance and behavior
|
||||
|
||||
{ config, pkgs, ... }:
|
||||
let
|
||||
cfg = import ./config.nix;
|
||||
palette = config.colorScheme.palette;
|
||||
in
|
||||
{
|
||||
home.file.".config/wofi/style.css".text = ''
|
||||
* {
|
||||
font-family: '${cfg.monoFont}', monospace;
|
||||
font-size: 18px;
|
||||
}
|
||||
|
||||
window {
|
||||
margin: 0px;
|
||||
padding: 20px;
|
||||
background-color: #${palette.base00};
|
||||
opacity: 0.95;
|
||||
}
|
||||
|
||||
#inner-box {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: none;
|
||||
background-color: #${palette.base00};
|
||||
}
|
||||
|
||||
#outer-box {
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
border: none;
|
||||
background-color: #${palette.base00};
|
||||
}
|
||||
|
||||
#scroll {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: none;
|
||||
background-color: #${palette.base00};
|
||||
}
|
||||
|
||||
#input {
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
border: none;
|
||||
background-color: #${palette.base00};
|
||||
color: @text;
|
||||
}
|
||||
|
||||
#input:focus {
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
border: none;
|
||||
}
|
||||
|
||||
#text {
|
||||
margin: 5px;
|
||||
border: none;
|
||||
color: #${palette.base06};
|
||||
}
|
||||
|
||||
#entry {
|
||||
background-color: #${palette.base00};
|
||||
}
|
||||
|
||||
#entry:selected {
|
||||
outline: none;
|
||||
border: none;
|
||||
}
|
||||
|
||||
#entry:selected #text {
|
||||
color: #${palette.base02};
|
||||
}
|
||||
|
||||
#entry image {
|
||||
-gtk-icon-transform: scale(0.7);
|
||||
}
|
||||
'';
|
||||
|
||||
programs.wofi = {
|
||||
enable = true;
|
||||
settings = {
|
||||
width = 600;
|
||||
height = 350;
|
||||
location = "center";
|
||||
show = "drun";
|
||||
prompt = "Search...";
|
||||
filter_rate = 100;
|
||||
allow_markup = true;
|
||||
no_actions = true;
|
||||
halign = "fill";
|
||||
orientation = "vertical";
|
||||
content_halign = "fill";
|
||||
insensitive = true;
|
||||
allow_images = true;
|
||||
image_size = 40;
|
||||
gtk_dark = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
5
home/programs/minimal.nix
Normal file
5
home/programs/minimal.nix
Normal file
@@ -0,0 +1,5 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Minimal profile: reuses server.nix for basic CLI programs
|
||||
imports = [ ./server.nix ];
|
||||
}
|
||||
@@ -327,7 +327,6 @@
|
||||
enable = true;
|
||||
|
||||
shellAbbrs = {
|
||||
fix-ssh = "eval $(tmux show-env | grep ^SSH_AUTH_SOCK | sed 's/=/ /;s/^/set /')";
|
||||
diff-persist = "sudo rsync -amvxx --dry-run --no-links --exclude '/tmp/*' --exclude '/root/*' / /persist/ | rg -v '^skipping|/$'";
|
||||
};
|
||||
|
||||
@@ -340,6 +339,16 @@
|
||||
set pure_show_prefix_root_prompt true
|
||||
set sponge_regex_patterns 'password|passwd'
|
||||
'';
|
||||
|
||||
functions = {
|
||||
brain = ''
|
||||
echo "🧠 Brain session starting..."
|
||||
echo " • wrap - end session with notes"
|
||||
echo " • inbox: <thought> - quick capture"
|
||||
echo ""
|
||||
cd ~/brain && claude
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
fzf = {
|
||||
@@ -348,8 +357,12 @@
|
||||
|
||||
git = {
|
||||
enable = true;
|
||||
userEmail = "petru@paler.net";
|
||||
userName = "Petru Paler";
|
||||
settings = {
|
||||
user = {
|
||||
email = "petru@paler.net";
|
||||
name = "Petru Paler";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
home-manager = {
|
||||
@@ -398,6 +411,12 @@
|
||||
|
||||
setw -g automatic-rename on
|
||||
set -g set-titles on
|
||||
|
||||
# first, unset update-environment[SSH_AUTH_SOCK] (idx 3), to prevent
|
||||
# the client overriding the global value
|
||||
set-option -g -u update-environment[3]
|
||||
# And set the global value to our static symlink'd path:
|
||||
set-environment -g SSH_AUTH_SOCK $HOME/.ssh/ssh_auth_sock
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{ pkgs, inputs, ... }:
|
||||
{ pkgs, lib, inputs, ... }:
|
||||
{
|
||||
imports = [
|
||||
../../common/global
|
||||
../../common/minimal-node.nix
|
||||
./hardware.nix
|
||||
./reverse-proxy.nix
|
||||
];
|
||||
@@ -11,4 +12,27 @@
|
||||
|
||||
networking.hostName = "alo-cloud-1";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kbdARC7CNTRL-pNQddmWV9q5C2sRV3WGep5ehjJ1qvcfD";
|
||||
|
||||
services.tailscale = {
|
||||
enable = true;
|
||||
useRoutingFeatures = lib.mkForce "server"; # enables IPv4/IPv6 forwarding + loose rp_filter
|
||||
extraUpFlags = [ "--advertise-exit-node" ];
|
||||
};
|
||||
|
||||
networking.nat = {
|
||||
enable = true;
|
||||
externalInterface = "enp1s0";
|
||||
internalInterfaces = [ "tailscale0" ];
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
enable = lib.mkForce true;
|
||||
allowedTCPPorts = [ 80 443 ]; # Public web traffic only
|
||||
allowedUDPPorts = [ 41641 ]; # Tailscale
|
||||
trustedInterfaces = [ "tailscale0" ]; # Full access via VPN
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
settings.PasswordAuthentication = false; # Keys only
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{ pkgs, ... }:
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.traefik ];
|
||||
environment.persistence."/persist".files = [ "/acme/acme.json" ];
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.files = [ "/acme/acme.json" ];
|
||||
|
||||
services.traefik = {
|
||||
enable = true;
|
||||
|
||||
77
hosts/beefy/default.nix
Normal file
77
hosts/beefy/default.nix
Normal file
@@ -0,0 +1,77 @@
|
||||
{ pkgs, inputs, config, ... }:
|
||||
{
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
# Desktop environment is imported via flake.nix for desktop profile
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/cluster-tools.nix # Nomad CLI (no service)
|
||||
../../common/docker.nix # Docker daemon
|
||||
../../common/ham-radio.nix # Ham radio tools (FLEcli)
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
diskLayout = {
|
||||
mainDiskDevice = "/dev/disk/by-id/nvme-CT1000P3PSSD8_25164F81F31D";
|
||||
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650797-0:0";
|
||||
keyDiskDevice = "/dev/sda";
|
||||
};
|
||||
|
||||
networking.hostName = "beefy";
|
||||
networking.cluster.primaryInterface = "enp1s0";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-k79UsDTw2v11CNTRL-oYqji35BE9c7CqM89Dzs9cBF14PmqYsi";
|
||||
|
||||
# Console blanking after 5 minutes (for greeter display sleep)
|
||||
# NMI watchdog for hardlockup detection
|
||||
boot.kernelParams = [ "consoleblank=300" "nmi_watchdog=1" ];
|
||||
|
||||
# Netconsole - stream kernel messages to zippy (192.168.1.2)
|
||||
# Must configure via configfs after network is up (interface doesn't exist at module load)
|
||||
boot.kernelModules = [ "netconsole" ];
|
||||
boot.kernel.sysctl."kernel.printk" = "8 4 1 7"; # Raise console_loglevel to send all messages
|
||||
systemd.services.netconsole-sender = {
|
||||
description = "Configure netconsole to send kernel messages to zippy";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
script = ''
|
||||
TARGET=/sys/kernel/config/netconsole/target1
|
||||
mkdir -p $TARGET
|
||||
# Disable first if already enabled (can't modify params while enabled)
|
||||
if [ -f $TARGET/enabled ] && [ "$(cat $TARGET/enabled)" = "1" ]; then
|
||||
echo 0 > $TARGET/enabled
|
||||
fi
|
||||
echo enp1s0 > $TARGET/dev_name
|
||||
echo 192.168.1.2 > $TARGET/remote_ip
|
||||
echo 6666 > $TARGET/remote_port
|
||||
echo c0:3f:d5:62:55:bb > $TARGET/remote_mac
|
||||
echo 1 > $TARGET/enabled
|
||||
'';
|
||||
};
|
||||
|
||||
# Kdump for kernel crash analysis
|
||||
boot.crashDump = {
|
||||
enable = true;
|
||||
reservedMemory = "256M";
|
||||
};
|
||||
|
||||
# Lockup detectors - panic on detection so kdump captures state
|
||||
boot.kernel.sysctl = {
|
||||
# Enable all SysRq functions for debugging hangs
|
||||
"kernel.sysrq" = 1;
|
||||
# Panic on soft lockup (CPU not scheduling for >20s)
|
||||
"kernel.softlockup_panic" = 1;
|
||||
# Panic on hung tasks (blocked >120s)
|
||||
"kernel.hung_task_panic" = 1;
|
||||
"kernel.hung_task_timeout_secs" = 120;
|
||||
};
|
||||
|
||||
# Persist crash dumps
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||
"/var/crash"
|
||||
];
|
||||
}
|
||||
19
hosts/beefy/hardware.nix
Normal file
19
hosts/beefy/hardware.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "usbhid" "usb_storage" "sd_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
hardware.cpu.amd.updateMicrocode = true; # Uncomment for AMD
|
||||
}
|
||||
BIN
hosts/beefy/key.bin
Normal file
BIN
hosts/beefy/key.bin
Normal file
Binary file not shown.
@@ -3,7 +3,9 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/compute-node.nix
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||
../../common/nfs-services-standby.nix # NFS standby for /data/services
|
||||
# To promote to NFS server (during failover):
|
||||
# 1. Follow procedure in docs/NFS_FAILOVER.md
|
||||
@@ -21,8 +23,8 @@
|
||||
networking.hostName = "c1";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-k2nQ771YHM11CNTRL-YVpoumL2mgR6nLPG51vNhRpEKMDN7gLAi";
|
||||
|
||||
# NFS standby configuration: accept replication from zippy
|
||||
nfsServicesStandby.replicationKeys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHyTKsMCbwCIlMcC/aopgz5Yfx/Q9QdlWC9jzMLgYFAV root@zippy-replication"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO5s73FSUiysHijWRGYCJY8lCtZkX1DGKAqp2671REDq root@sparky-replication"
|
||||
];
|
||||
}
|
||||
|
||||
@@ -3,16 +3,18 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/compute-node.nix
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
diskLayout = {
|
||||
mainDiskDevice = "/dev/disk/by-id/nvme-KINGSTON_SNV3S1000G_50026B7383365CD5";
|
||||
mainDiskDevice = "/dev/disk/by-id/nvme-KINGSTON_SNV3S1000G_50026B73841C1892";
|
||||
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777650675-0:0";
|
||||
keyDiskDevice = "/dev/sda";
|
||||
};
|
||||
|
||||
networking.hostName = "c2";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kBxfZWwkwN11CNTRL-NPkBePmSvdErPVZ5ECuefE12ZWPzpNou";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kQ11fTmrzd11CNTRL-N4c2L3SAzUbvcAVhqCFWUbAEasJNTknd";
|
||||
}
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/compute-node.nix
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||
../../common/nomad-server.nix # Consul + Nomad server mode
|
||||
../../common/binary-cache-server.nix
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/workstation-node.nix # Dev tools (deploy-rs, docker, nix-ld)
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/cluster-tools.nix # Nomad CLI (no service)
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
@@ -18,8 +21,8 @@
|
||||
};
|
||||
|
||||
networking.hostName = "chilly";
|
||||
networking.cluster.primaryInterface = "br0";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kRXS9oPyPm11CNTRL-BE6YnbP9J6ZZuV9dHkX17ZMnm1JGdu93";
|
||||
services.consul.interface.advertise = lib.mkForce "br0";
|
||||
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.enable = true;
|
||||
|
||||
@@ -3,15 +3,24 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/cluster-member.nix
|
||||
../../common/nomad-worker.nix
|
||||
../../common/nfs-services-server.nix
|
||||
# To move NFS server role to another host:
|
||||
# 1. Follow procedure in docs/NFS_FAILOVER.md
|
||||
# 2. Replace above line with: ../../common/nfs-services-standby.nix
|
||||
# 3. Add nfsServicesStandby.replicationKeys with the new server's public key
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
diskLayout = {
|
||||
mainDiskDevice = "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNF0MA33640P";
|
||||
mainDiskDevice = "/dev/disk/by-id/nvme-KIOXIA-EXCERIA_with_Heatsink_SSD_84GF7016FA4S";
|
||||
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777660468-0:0";
|
||||
keyDiskDevice = "/dev/sda";
|
||||
};
|
||||
|
||||
networking.hostName = "sparky";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kFGr5T4rtT11CNTRL-Ls3wbQz5Nr2AUyzeLaC3s2eChNasyPdR";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-k6VC79UrzN11CNTRL-rvPmd4viyrQ261ifCrfTrQve7c2FesxrG";
|
||||
|
||||
nfsServicesServer.standbys = [ "c1" ];
|
||||
}
|
||||
|
||||
61
hosts/stinky/default.nix
Normal file
61
hosts/stinky/default.nix
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../../common/global
|
||||
../../common/impermanence-common.nix # Impermanence with custom root config (see hardware.nix)
|
||||
../../common/resource-limits.nix
|
||||
../../common/sshd.nix
|
||||
../../common/user-ppetru.nix
|
||||
../../common/wifi.nix
|
||||
# Note: No systemd-boot.nix - Raspberry Pi uses generic-extlinux-compatible (from sd-image module)
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
hardware = {
|
||||
raspberry-pi."4".apply-overlays-dtmerge.enable = true;
|
||||
deviceTree = {
|
||||
enable = true;
|
||||
filter = "*rpi-4-*.dtb";
|
||||
};
|
||||
};
|
||||
|
||||
networking.hostName = "stinky";
|
||||
|
||||
# Configure impermanence for tmpfs root (filesystem config in hardware.nix)
|
||||
custom.impermanence.persistPath = "/nix/persist";
|
||||
|
||||
# Tailscale configuration
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-kZC8HX3wSw11CNTRL-7QvqxAphyzM7QeMUTKXv2Ng2RK4XCmg9A";
|
||||
|
||||
# OctoPrint for 3D printer
|
||||
services.octoprint = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# Persist OctoPrint data
|
||||
environment.persistence.${config.custom.impermanence.persistPath}.directories = [
|
||||
"/var/lib/octoprint"
|
||||
];
|
||||
|
||||
# Pi HQ Camera support
|
||||
boot.kernelModules = [ "bcm2835-v4l2" ];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
libcamera
|
||||
libraspberrypi
|
||||
raspberrypi-eeprom
|
||||
];
|
||||
|
||||
# Firewall: Allow access to OctoPrint
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
5000 # OctoPrint
|
||||
];
|
||||
|
||||
# Override global default (stinky is a new system with 25.05)
|
||||
system.stateVersion = lib.mkForce "25.05";
|
||||
}
|
||||
73
hosts/stinky/hardware.nix
Normal file
73
hosts/stinky/hardware.nix
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/installer/sd-card/sd-image-aarch64.nix")
|
||||
];
|
||||
|
||||
# Raspberry Pi 4 platform
|
||||
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
|
||||
|
||||
# Disable ZFS (not needed, and broken with latest kernel)
|
||||
boot.supportedFilesystems.zfs = lib.mkForce false;
|
||||
|
||||
# Boot configuration - provided by sd-image-aarch64.nix
|
||||
# (grub disabled, generic-extlinux-compatible enabled, U-Boot setup)
|
||||
|
||||
# /boot/firmware is automatically configured by sd-image module
|
||||
# Device: /dev/disk/by-label/FIRMWARE (vfat)
|
||||
|
||||
# tmpfs root with impermanence
|
||||
# Override sd-image module's ext4 root definition with mkForce
|
||||
fileSystems."/" = lib.mkForce {
|
||||
device = "none";
|
||||
fsType = "tmpfs";
|
||||
options = [
|
||||
"defaults"
|
||||
"size=2G"
|
||||
"mode=755"
|
||||
];
|
||||
};
|
||||
|
||||
# The SD partition contains /nix/store and /nix/persist at its root
|
||||
# Mount it at a hidden location, then bind mount its /nix to /nix
|
||||
fileSystems."/mnt/nixos-sd" = {
|
||||
device = "/dev/disk/by-label/NIXOS_SD";
|
||||
fsType = "ext4";
|
||||
options = [ "noatime" ];
|
||||
neededForBoot = true;
|
||||
};
|
||||
|
||||
# Bind mount /nix from the SD partition
|
||||
fileSystems."/nix" = {
|
||||
device = "/mnt/nixos-sd/nix";
|
||||
fsType = "none";
|
||||
options = [ "bind" ];
|
||||
neededForBoot = true;
|
||||
depends = [ "/mnt/nixos-sd" ];
|
||||
};
|
||||
|
||||
# No swap on SD card (wear concern)
|
||||
swapDevices = [ ];
|
||||
|
||||
# SD image build configuration
|
||||
sdImage = {
|
||||
compressImage = true;
|
||||
|
||||
# Populate root with directories
|
||||
populateRootCommands = ''
|
||||
mkdir -p ./files/boot
|
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
|
||||
|
||||
# Create /nix/persist directory structure for impermanence
|
||||
mkdir -p ./files/nix/persist/var/lib/nixos
|
||||
mkdir -p ./files/nix/persist/home/ppetru
|
||||
mkdir -p ./files/nix/persist/etc
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -3,16 +3,15 @@
|
||||
imports = [
|
||||
../../common/encrypted-btrfs-layout.nix
|
||||
../../common/global
|
||||
../../common/compute-node.nix
|
||||
# ../../common/ethereum.nix
|
||||
../../common/nfs-services-server.nix # NFS server for /data/services
|
||||
# To move NFS server role to another host:
|
||||
# 1. Follow procedure in docs/NFS_FAILOVER.md
|
||||
# 2. Replace above line with: ../../common/nfs-services-standby.nix
|
||||
# 3. Add nfsServicesStandby.replicationKeys with the new server's public key
|
||||
../../common/cluster-member.nix # Consul + storage clients
|
||||
../../common/nomad-worker.nix # Nomad client (runs jobs)
|
||||
../../common/netconsole-receiver.nix
|
||||
./hardware.nix
|
||||
];
|
||||
|
||||
# Receive kernel messages from beefy via netconsole
|
||||
services.netconsoleReceiver.enable = true;
|
||||
|
||||
diskLayout = {
|
||||
mainDiskDevice = "/dev/disk/by-id/ata-KINGSTON_SKC600MS1024G_50026B7785AE0A92";
|
||||
#keyDiskDevice = "/dev/disk/by-id/usb-Intenso_Micro_Line_22080777660702-0:0";
|
||||
@@ -21,7 +20,4 @@
|
||||
|
||||
networking.hostName = "zippy";
|
||||
services.tailscaleAutoconnect.authkey = "tskey-auth-ktKyQ59f2p11CNTRL-ut8E71dLWPXsVtb92hevNX9RTjmk4owBf";
|
||||
|
||||
# NFS server configuration: replicate to c1 as standby
|
||||
nfsServicesServer.standbys = [ "c1" ];
|
||||
}
|
||||
|
||||
100
nix-runner/README.md
Normal file
100
nix-runner/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Nix Runner for Gitea Actions
|
||||
|
||||
Custom Docker image for running Nix builds in CI.
|
||||
|
||||
## What's Included
|
||||
|
||||
- **Nix** with flakes enabled (`experimental-features = nix-command flakes`)
|
||||
- **Node.js 20** for JavaScript-based GitHub Actions
|
||||
- **Tools**: git, curl, jq, skopeo, bash, coreutils
|
||||
- **Binary caches**:
|
||||
- `c3.mule-stork.ts.net:8501` (local cache proxy)
|
||||
- `cache.nixos.org` (official)
|
||||
|
||||
## Usage
|
||||
|
||||
In your workflow:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#myPackage
|
||||
```
|
||||
|
||||
The `nix` label is configured in `services/act-runner.hcl`.
|
||||
|
||||
## Current Version
|
||||
|
||||
**Tag**: `v4`
|
||||
**Image**: `gitea.v.paler.net/alo/nix-runner:v4`
|
||||
|
||||
## Updating the Runner
|
||||
|
||||
### 1. Edit `flake.nix`
|
||||
|
||||
Make your changes, then bump the tag:
|
||||
|
||||
```nix
|
||||
tag = "v5"; # was v4
|
||||
```
|
||||
|
||||
### 2. Build
|
||||
|
||||
```bash
|
||||
cd nix-runner
|
||||
nix build
|
||||
```
|
||||
|
||||
### 3. Push to Registry
|
||||
|
||||
```bash
|
||||
skopeo copy --dest-authfile ~/.docker/config.json \
|
||||
docker-archive:result \
|
||||
docker://gitea.v.paler.net/alo/nix-runner:v5
|
||||
```
|
||||
|
||||
### 4. Update act-runner
|
||||
|
||||
Edit `services/act-runner.hcl`:
|
||||
|
||||
```hcl
|
||||
GITEA_RUNNER_LABELS = "ubuntu-latest:docker://node:20-bookworm,nix:docker://gitea.v.paler.net/alo/nix-runner:v5"
|
||||
```
|
||||
|
||||
### 5. Re-register Runner
|
||||
|
||||
```bash
|
||||
sudo rm /data/services/act-runner/.runner
|
||||
nomad run services/act-runner.hcl
|
||||
```
|
||||
|
||||
The runner will re-register with the new labels.
|
||||
|
||||
## Configuration
|
||||
|
||||
The image uses `NIX_CONFIG` environment variable for Nix settings:
|
||||
|
||||
```
|
||||
experimental-features = nix-command flakes
|
||||
sandbox = false
|
||||
build-users-group =
|
||||
substituters = http://c3.mule-stork.ts.net:8501 https://cache.nixos.org
|
||||
trusted-public-keys = cache.nixos.org-1:... c3:...
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Build fails with `build-users-group` error
|
||||
|
||||
The image runs as root without the nixbld group. This is handled by `build-users-group =` in NIX_CONFIG.
|
||||
|
||||
### Can't fetch from cache
|
||||
|
||||
Check that the runner container can reach `c3.mule-stork.ts.net:8501` (Tailscale network).
|
||||
|
||||
### Missing tool
|
||||
|
||||
Add it to `paths` in `flake.nix` and rebuild/push a new version.
|
||||
61
nix-runner/flake.lock
generated
Normal file
61
nix-runner/flake.lock
generated
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1767379071,
|
||||
"narHash": "sha256-EgE0pxsrW9jp9YFMkHL9JMXxcqi/OoumPJYwf+Okucw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "fb7944c166a3b630f177938e478f0378e64ce108",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user