Compare commits

..

4 commits

Author SHA1 Message Date
711dc677ec
WIP/backup: Deployment batch antigravity 2026-03-17 18:18:19 +01:00
5a031b48ed
fix(vaultwarden): Update image - fix webui not loading 2026-03-05 20:26:33 +01:00
47245b2b96
flake.lock: Update
Flake lock file updates:

• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/544961dfcce86422ba200ed9a0b00dd4b1486ec5?narHash=sha256-EVAqOteLBFmd7pKkb0%2BFIUyzTF61VKi7YmvP1tw4nEw%3D' (2025-10-15)
  → 'github:NixOS/nixpkgs/80bdc1e5ce51f56b19791b52b2901187931f5353?narHash=sha256-QKyJ0QGWBn6r0invrMAK8dmJoBYWoOWy7lN%2BUHzW1jc%3D' (2026-03-04)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/ab8d56e85b8be14cff9d93735951e30c3e86a437?narHash=sha256-8mN3kqyqa2PKY0wwZ2UmMEYMcxvNTwLaOrrDsw6Qi4E%3D' (2025-10-13)
  → 'github:Mic92/sops-nix/1d9b98a29a45abe9c4d3174bd36de9f28755e3ff?narHash=sha256-hmIvE/slLKEFKNEJz27IZ8BKlAaZDcjIHmkZ7GCEjfw%3D' (2026-03-02)
2026-03-05 20:17:50 +01:00
c04bce06b6
feat: Test Action
Including commits:
* chore: Disable test workflow
* Determine hosts
* Build each host
* Add NixOS to build step as well
* More specific hostnames
* fix json elements
* try different way
* Debug matrix
* run on push
* use var directly
* fix mappings
* fix mappings
* add toolcache
* Change names and ordere
* Debugging
* Debugging extra step
* Debugging needs outputs
* Debugging needs outputs hosts
* Preserve quotes
* printf escaped
* Using EOF
* test
* escape?
* without json parse
* hardcoding
* change build command
* Testing
2026-03-05 20:09:05 +01:00
33 changed files with 618 additions and 359 deletions

40
.github/workflows/build.yml vendored Normal file
View file

@ -0,0 +1,40 @@
name: Build
on:
push:
branches:
- main
- 'test-*'
pull_request:
jobs:
# Job to find all hosts that should be built
get-hosts:
runs-on: ubuntu-latest
container: catthehacker/ubuntu:act-24.04
outputs:
hosts: ${{ steps.set-hosts.outputs.hosts }}
steps:
- uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
- id: set-hosts
run: |
# Extract host names from nixosConfigurations
HOSTS=$(nix eval .#nixosConfigurations --apply "builtins.attrNames" --json)
echo "hosts=$HOSTS" >> $GITHUB_OUTPUT
build:
needs: get-hosts
runs-on: ubuntu-latest
container: catthehacker/ubuntu:act-24.04
strategy:
fail-fast: false
matrix:
host: ${{ fromJson(needs.get-hosts.outputs.hosts) }}
steps:
- uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
- name: Build NixOS configuration
run: nix build .#nixosConfigurations.${{ matrix.host }}.config.system.build.toplevel

24
.github/workflows/check.yml vendored Normal file
View file

@ -0,0 +1,24 @@
name: Check
on:
push:
branches:
- '**'
pull_request:
jobs:
check:
runs-on: ubuntu-latest
container: catthehacker/ubuntu:act-24.04
steps:
- uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
with:
extra_nix_config: |
experimental-features = nix-command flakes
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
- name: Flake check
run: nix flake check

81
.github/workflows/deploy.yml vendored Normal file
View file

@ -0,0 +1,81 @@
name: Deploy
on:
push:
branches:
- main
- 'test-*'
workflow_dispatch:
inputs:
mode:
description: 'Activation mode (switch, boot, test)'
default: 'switch'
required: true
jobs:
deploy:
runs-on: ubuntu-latest
container: catthehacker/ubuntu:act-24.04
steps:
- uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
with:
extra_nix_config: |
experimental-features = nix-command flakes
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.DEPLOY_SSH_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
ssh-keyscan -H 192.168.0.0/24 >> ~/.ssh/known_hosts || true
# Disable strict host key checking for the local network if needed,
# or rely on known_hosts. For homelab, we can be slightly more relaxed
# but let's try to be secure.
echo "StrictHostKeyChecking no" >> ~/.ssh/config
- name: Verify Commit Signature
if: github.event.sender.login != 'renovate[bot]'
run: |
# TODO Hugo: Export your public GPG/SSH signing keys to a runner secret named 'TRUSTED_SIGNERS'.
# For GPG: gpg --export --armor <id> | base64 -w0
if [ -z "${{ secrets.TRUSTED_SIGNERS }}" ]; then
echo "::error::TRUSTED_SIGNERS secret is missing. Deployment aborted for safety."
exit 1
fi
# Implementation note: This step expects a keyring in the TRUSTED_SIGNERS secret.
# We use git to verify the signature of the current commit.
echo "${{ secrets.TRUSTED_SIGNERS }}" | base64 -d > /tmp/trusted_keys.gpg
gpg --import /tmp/trusted_keys.gpg
if ! git verify-commit HEAD; then
echo "::error::Commit signature verification failed. Only signed commits from trusted maintainers can be deployed."
exit 1
fi
echo "Commit signature verified successfully."
- name: Install deploy-rs
run: nix profile install github:serokell/deploy-rs
- name: Deploy to hosts
run: |
# Determine profile based on branch
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
# Main site: persistent deployment
deploy . --skip-checks --targets $(deploy . --list | grep '.system$' | tr '\n' ' ')
elif [[ "${{ github.ref }}" == "refs/heads/test-"* ]]; then
# Test branch: non-persistent deployment (test profile)
# The branch name should be test-<hostname>
HOSTNAME="${GITHUB_REF#refs/heads/test-}"
deploy .#${HOSTNAME}.test --skip-checks
fi
- name: Manual Deploy
if: github.event_name == 'workflow_dispatch'
run: |
# TODO: Implement manual dispatch logic if needed
deploy . --skip-checks

17
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: "Test"
on:
pull_request:
push:
jobs:
tests:
if: false
runs-on: ubuntu-latest
container:
image: catthehacker/ubuntu:act-latest
steps:
- uses: actions/checkout@v5
- uses: https://github.com/cachix/install-nix-action@v31
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: "My custom step"
run: nix run nixpkgs#hello

1
.gitignore vendored
View file

@ -1 +1,2 @@
.idea
result

64
README.md Normal file
View file

@ -0,0 +1,64 @@
# Bos55 NixOS Config
Automated CI/CD deployment for NixOS homelab using `deploy-rs`.
## Repository Structure
- `hosts/`: Host-specific configurations.
- `modules/`: Shared NixOS modules.
- `users/`: User definitions (including the `deploy` user).
- `secrets/`: Encrypted secrets via `sops-nix`.
## Deployment Workflow
### Prerequisites
- SSH access to the `deploy` user on target hosts.
- `deploy-rs` installed locally (`nix profile install github:serokell/deploy-rs`).
### Deployment Modes
1. **Production Deployment (main branch):**
Triggered on push to `main`. Automatically builds and switches all hosts. bootloader is updated.
Manual: `deploy .`
2. **Test Deployment (test-<hostname> branch):**
Triggered on push to `test-<hostname>`. Builds and activates the configuration on the specific host **without** updating the bootloader. Reboots will revert to the previous generation.
Manual: `deploy .#<hostname>.test`
3. **Kernel Upgrades / Maintenance:**
Use `deploy .#<hostname>.system --boot` to update the bootloader without immediate activation, followed by a manual reboot.
## Local Development
### 1. Developer Shell
This repository includes a standardized development environment containing all necessary tools (`deploy-rs`, `sops`, `age`, etc.).
```bash
nix develop
# or if using direnv
direnv allow
```
### 2. Build a host VM
You can build a QEMU VM for any host configuration to test changes locally:
```bash
nix build .#nixosConfigurations.<hostname>.config.system.build.vm
./result/bin/run-<hostname>-vm
```
> [!WARNING]
> **Network Conflict**: Default VMs use user-mode networking (NAT) which is safe. However, if you configure the VM to use bridge networking, it will attempt to use the static IP defined in `hostIp`. Ensure you do not have a physical host with that IP active on the same bridge to avoid network interference.
### 3. Run Integration Tests
Run the automated test suite:
```bash
nix-build test/vm-test.nix
```
### 3. Test CI Workflows Locally
Use `act` to test the GitHub Actions workflows:
```bash
act -W .github/workflows/check.yml
```
## Security
See [SECURITY.md](SECURITY.md) for details on the trust model and secret management.

93
SECURITY.md Normal file
View file

@ -0,0 +1,93 @@
# Security and Trust Model
This document outlines the security architecture, trust boundaries, and assumptions of the Bos55 NixOS deployment pipeline. This model is designed to support a multi-member infrastructure team and remains secure even if the repository is published publicly.
## Trust Zones
The system is partitioned into three distinct trust zones, each with specific controls to prevent lateral movement and privilege escalation.
### 🔴 Zone 1: Trusted Maintainers (Source of Truth)
* **Actors:** Infrastructure Team / Maintainers.
* **Capabilities:**
* Full access to the Git repository.
* Ownership of `sops-nix` master keys (GPG or Age).
* Direct root access to NixOS hosts via personal SSH keys for emergency maintenance.
* **Trust:** Root of trust. All changes must originate from or be approved by a Trusted Maintainer.
* **Security Controls:**
* **Signed Commits:** All contributions must be cryptographically signed by a trusted GPG/SSH key to be eligible for deployment.
- **MFA:** Hardware-based multi-factor authentication for repository access.
- **Metadata Redaction:** Sensitive identifiers like SSH `authorizedKeys` are stored in `sops-nix`. This prevents **infrastructure fingerprinting**, where an attacker could link your public keys to your personal identities or other projects.
### 🟡 Zone 2: CI/CD Pipeline (Automation Layer)
* **Actor:** GitHub Actions / Forgejo Runners.
* **Capabilities:**
* Builds Nix derivations from the repository.
* Access to the `DEPLOY_SSH_KEY` (allowing SSH access to the `deploy` user on target hosts).
* **Trusted Signers:** The public keys for verifying signatures are stored as a **Runner Secret** (`TRUSTED_SIGNERS`). This hides the identities of the infrastructure team even in a public repository.
* **NO ACCESS** to `sops-nix` decryption keys. Secrets remain encrypted during the build.
* **Security Controls:**
* **Signature Enforcement:** The `deploy.yml` workflow verifies the cryptographic signature of every maintainer commit. Deployment is aborted if the signature is missing or untrusted.
* **Sandboxing:** Runners execute in ephemeral, isolated containers.
* **Branch Protection:** Deployments to production (`main`) require approved Pull Requests.
* **Fork Protection:** CI workflows (and secrets) are explicitly disabled for forks.
### 🟢 Zone 3: Target NixOS Hosts (Runtime)
* **Actor:** Production, Testing, and Service nodes.
* **Capabilities:** Decrypt secrets locally using host-specific `age` keys.
* **Trust:** Consumers of builds. They trust Zone 2 only for the pushing of store paths and triggering activation scripts.
* **Security Controls:**
* **Restricted `deploy` User:** The SSH user for automation is non-root. Sudo access is strictly policed via `sudoers` rules to allow only `nix-env` and `switch-to-configuration`.
* **Immutable Store:** Building on Nix ensures that the system state is derived from a cryptographically hashed store, preventing unauthorized local modifications from persisting across reboots.
---
## Security Assumptions & Policies
### 1. Public Repository Safety
The repository is designed to be safe for public viewing. No unencrypted secrets should ever be committed. The deployment pipeline is protected against "malicious contributors" via:
- **Mandatory PR Reviews:** No code can reach the `main` branch without peer review.
- **Secret Scoping:** Deployment keys are only available to authorized runs on protected branches.
### 2. Supply Chain & Dependencies
- **Flake Lockfiles:** All dependencies (Nixpkgs, `deploy-rs`, etc.) are pinned to specific git revisions.
- **Renovate Bot:** Automated version upgrades allow for consistent tracking of upstream changes, though they require manual review or successful status checks for minor/patch versions.
### 3. Signed Commit Enforcement
To prevent "force-push" attacks or runner compromises from injecting malicious code into the history, the pipeline should be configured to only deploy commits signed by a known "Trusted Maintainer" key. This ensures that even if a git account is compromised, the attacker cannot deploy code without the physical/cryptographic signing key.
---
## Trust Boundary Diagram
```mermaid
graph TD
subgraph "Zone 1: Trusted Workstations"
DEV["Maintainers (Team)"]
SOPS_KEYS["Master SOPS Keys"]
SIGN_KEYS["Signing Keys (GPG/SSH)"]
end
subgraph "Zone 2: CI/CD Runner (Sandboxed)"
CI["Automated Runner"]
SSH_KEY["Deploy SSH Key"]
end
subgraph "Zone 3: NixOS Target Hosts"
HOST["Target Host"]
HOST_AGE["Host Age Key"]
end
DEV -- "Signed Push / PR" --> CI
CI -- "Push Store Paths & Activate" --> HOST
HOST_AGE -- "Local Decrypt" --> HOST
style DEV fill:#f96,stroke:#333
style CI fill:#ff9,stroke:#333
style HOST fill:#9f9,stroke:#333
```
## Security Best Practices for Maintainers
1. **Keep Master Keys Offline:** Never store `sops-nix` master keys on the CI runner or public servers.
2. **Audit Runner Logs:** Periodically review CI execution logs for unexpected behavior.
3. **Rotate Deployment Keys:** Rotate the `DEPLOY_SSH_KEY` if maintainer membership changes significantly.

88
flake.lock generated
View file

@ -1,8 +1,46 @@
{
"nodes": {
"deploy-rs": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"utils": "utils"
},
"locked": {
"lastModified": 1770019181,
"narHash": "sha256-hwsYgDnby50JNVpTRYlF3UR/Rrpt01OrxVuryF40CFY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "77c906c0ba56aabdbc72041bf9111b565cdd6171",
"type": "github"
},
"original": {
"owner": "serokell",
"repo": "deploy-rs",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
"systems": "systems_2"
},
"locked": {
"lastModified": 1731533236,
@ -20,11 +58,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1760524057,
"narHash": "sha256-EVAqOteLBFmd7pKkb0+FIUyzTF61VKi7YmvP1tw4nEw=",
"lastModified": 1772624091,
"narHash": "sha256-QKyJ0QGWBn6r0invrMAK8dmJoBYWoOWy7lN+UHzW1jc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "544961dfcce86422ba200ed9a0b00dd4b1486ec5",
"rev": "80bdc1e5ce51f56b19791b52b2901187931f5353",
"type": "github"
},
"original": {
@ -35,10 +73,11 @@
},
"root": {
"inputs": {
"deploy-rs": "deploy-rs",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"sops-nix": "sops-nix",
"utils": "utils"
"utils": "utils_2"
}
},
"sops-nix": {
@ -48,11 +87,11 @@
]
},
"locked": {
"lastModified": 1760393368,
"narHash": "sha256-8mN3kqyqa2PKY0wwZ2UmMEYMcxvNTwLaOrrDsw6Qi4E=",
"lastModified": 1772495394,
"narHash": "sha256-hmIvE/slLKEFKNEJz27IZ8BKlAaZDcjIHmkZ7GCEjfw=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "ab8d56e85b8be14cff9d93735951e30c3e86a437",
"rev": "1d9b98a29a45abe9c4d3174bd36de9f28755e3ff",
"type": "github"
},
"original": {
@ -76,7 +115,40 @@
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"utils_2": {
"inputs": {
"flake-utils": [
"flake-utils"

View file

@ -13,52 +13,81 @@
url = "github:gytis-ivaskevicius/flake-utils-plus";
inputs.flake-utils.follows = "flake-utils";
};
deploy-rs = {
url = "github:serokell/deploy-rs";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = inputs@{
self, nixpkgs,
flake-utils, sops-nix, utils,
flake-utils, sops-nix, utils, deploy-rs,
...
}:
let
system = utils.lib.system.x86_64-linux;
system = "x86_64-linux";
lib = nixpkgs.lib;
in
utils.lib.mkFlake {
inherit self inputs;
utils.lib.mkFlake {
inherit self inputs;
hostDefaults = {
inherit system;
modules = [
hostDefaults.modules = [
./modules
./users
sops-nix.nixosModules.sops
({ self, ... }: {
sops.defaultSopsFile = "${self}/secrets/secrets.yaml";
sops.age.keyFile = "/var/lib/sops-nix/key.txt";
})
];
hosts = {
# Infrastructure
Niko.modules = [ ./hosts/Niko ];
Ingress.modules = [ ./hosts/Ingress ];
Gitea.modules = [ ./hosts/Gitea ];
Vaultwarden.modules = [ ./hosts/Vaultwarden ];
# Production
Binnenpost.modules = [ ./hosts/Binnenpost ];
Production.modules = [ ./hosts/Production ];
ProductionGPU.modules = [ ./hosts/ProductionGPU ];
ProductionArr.modules = [ ./hosts/ProductionArr ];
ACE.modules = [ ./hosts/ACE ];
# Lab
Template.modules = [ ./hosts/Template ];
Development.modules = [ ./hosts/Development ];
Testing.modules = [ ./hosts/Testing ];
};
deploy.nodes = let
pkg = deploy-rs.lib.${system};
isDeployable = nixos: (nixos.config.homelab.users.deploy.enable or false) && (nixos.config.homelab.networking.hostIp != null);
in
builtins.mapAttrs (_: nixos: {
hostname = nixos.config.homelab.networking.hostIp;
sshUser = "deploy";
user = "root";
profiles.system.path = pkg.activate.nixos nixos;
profiles.test.path = pkg.activate.custom nixos.config.system.build.toplevel ''
$PROFILE/bin/switch-to-configuration test
'';
}) (lib.filterAttrs (_: isDeployable) self.nixosConfigurations);
checks = builtins.mapAttrs (_: lib: lib.deployChecks self.deploy) deploy-rs.lib;
outputsBuilder = channels: {
formatter = channels.nixpkgs.alejandra;
devShells.default = channels.nixpkgs.mkShell {
name = "homelab-dev";
buildInputs = [
deploy-rs.packages.${system}.deploy-rs
channels.nixpkgs.sops
channels.nixpkgs.age
];
shellHook = "echo '🛡 Homelab Development Shell Loaded'";
};
};
};
hosts = {
# Physical hosts
Niko.modules = [ ./hosts/Niko ];
# Virtual machines
# Single-service
Ingress.modules = [ ./hosts/Ingress ];
Gitea.modules = [ ./hosts/Gitea ];
Vaultwarden.modules = [ ./hosts/Vaultwarden ];
# Production multi-service
Binnenpost.modules = [ ./hosts/Binnenpost ];
Production.modules = [ ./hosts/Production ];
ProductionGPU.modules = [ ./hosts/ProductionGPU ];
ProductionArr.modules = [ ./hosts/ProductionArr ];
ACE.modules = [ ./hosts/ACE ];
# Others
Template.modules = [ ./hosts/Template ];
Development.modules = [ ./hosts/Development ];
Testing.modules = [ ./hosts/Testing ];
};
};
}

View file

@ -1,10 +1,12 @@
{ pkgs, ... }:
{ config, pkgs, ... }:
{
config = {
homelab = {
networking.hostIp = "192.168.0.41";
services.actions.enable = true;
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -24,7 +26,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.41";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -1,4 +1,4 @@
{ pkgs, ... }:
{ config, pkgs, ... }:
{
config = {
@ -13,12 +13,14 @@
};
homelab = {
networking.hostIp = "192.168.0.89";
apps = {
speedtest.enable = true;
technitiumDNS.enable = true;
traefik.enable = true;
};
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -43,7 +45,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.89";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -3,6 +3,7 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.91";
apps = {
bind9.enable = true;
homepage = {
@ -11,9 +12,9 @@
};
traefik.enable = true;
plex.enable = true;
solidtime.enable = true;
};
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -37,7 +38,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.91";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];
@ -60,7 +61,8 @@
environment = {
# NOTE Required
# The email address used when setting up the initial administrator account to login to pgAdmin.
PGADMIN_DEFAULT_EMAIL = "kmtl.hugo+pgadmin@gmail.com";
# TODO Hugo: Populate 'pgadmin_email' in sops.
PGADMIN_DEFAULT_EMAIL = config.sops.placeholder.pgadmin_email or "pgadmin-admin@example.com";
# NOTE Required
# The password used when setting up the initial administrator account to login to pgAdmin.
PGADMIN_DEFAULT_PASSWORD = "ChangeMe";

View file

@ -3,9 +3,12 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.24";
apps.gitea.enable = true;
virtualisation.guest.enable = true;
users.deploy.enable = true;
users.admin = {
enable = true;
authorizedKeys = [
@ -28,7 +31,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.24";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -2,7 +2,11 @@
{
config = {
homelab.virtualisation.guest.enable = true;
homelab = {
networking.hostIp = "192.168.0.10";
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
hostName = "Ingress";
@ -19,8 +23,8 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.10";
prefixLength = 24;
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];
};
@ -39,6 +43,7 @@ prefixLength = 24;
};
};
security.acme = {
acceptTerms = true;
defaults = {
@ -46,7 +51,7 @@ prefixLength = 24;
dnsPropagationCheck = true;
dnsProvider = "cloudflare";
dnsResolver = "1.1.1.1:53";
email = "tibo.depeuter@telenet.be";
email = config.sops.placeholder.acme_email or "acme-email@example.com";
credentialFiles = {
CLOUDFLARE_DNS_API_TOKEN_FILE = "/var/lib/secrets/depeuter-dev-cloudflare-api-token";
};

View file

@ -165,7 +165,7 @@ providers:
# Certificates
"--certificatesresolvers.letsencrypt.acme.dnschallenge=true"
"--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare"
"--certificatesresolvers.letsencrypt.acme.email=tibo.depeuter@telenet.be"
"--certificatesresolvers.letsencrypt.acme.email=${config.sops.placeholder.acme_email or "acme-email@example.com"}"
"--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
# Additional routes
@ -176,8 +176,8 @@ providers:
# "8080:8080/tcp" # The Web UI (enabled by --api.insecure=true)
];
environment = {
# TODO Hide this!
"CLOUDFLARE_DNS_API_TOKEN" = "6Vz64Op_a6Ls1ljGeBxFoOVfQ-yB-svRbf6OyPv2";
# TODO Hugo: Populate 'cloudflare_dns_token' in sops.
"CLOUDFLARE_DNS_API_TOKEN" = config.sops.placeholder.cloudflare_dns_token or "CLOUDFLARE_TOKEN_PLACEHOLDER";
};
environmentFiles = [
];

View file

@ -7,6 +7,7 @@
];
homelab = {
networking.hostIp = "192.168.0.11";
apps = {
technitiumDNS.enable = true;
traefik.enable = true;

View file

@ -3,11 +3,13 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.31";
apps = {
calibre.enable = true;
traefik.enable = true;
};
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -31,7 +33,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.31";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -3,11 +3,13 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.33";
apps = {
arr.enable = true;
traefik.enable = true;
};
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -31,7 +33,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.33";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -3,8 +3,10 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.94";
apps.jellyfin.enable = true;
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -28,7 +30,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.94";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -3,11 +3,13 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.92";
apps = {
freshrss.enable = true;
traefik.enable = true;
};
virtualisation.guest.enable = true;
users.deploy.enable = true;
};
networking = {
@ -32,7 +34,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.92";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -3,6 +3,7 @@
{
config = {
homelab = {
networking.hostIp = "192.168.0.22";
apps.vaultwarden = {
enable = true;
domain = "https://vault.depeuter.dev";
@ -10,11 +11,15 @@
};
virtualisation.guest.enable = true;
users.admin = {
enable = true;
authorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnihoyozOCnm6T9OzL2xoMeMZckBYR2w43us68ABA93"
];
users = {
deploy.enable = true;
admin = {
enable = true;
authorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnihoyozOCnm6T9OzL2xoMeMZckBYR2w43us68ABA93"
];
};
};
};
@ -32,7 +37,7 @@
interfaces.ens18 = {
ipv4.addresses = [
{
address = "192.168.0.22";
address = config.homelab.networking.hostIp;
prefixLength = 24;
}
];

View file

@ -9,7 +9,6 @@
./homepage
./jellyfin
./plex
./solidtime
./speedtest
./technitium-dns
./traefik

View file

@ -496,7 +496,8 @@ in {
#FORGEJO__mailer__CLIENT_KEY_FILE = "custom/mailer/key.pem";
# Mail from address, RFC 5322. This can be just an email address, or the
# `"Name" <email@example.com>` format.
FORGEJO__mailer__FROM = ''"${title}" <git@depeuter.dev>'';
# TODO Hugo: Populate 'gitea_mailer_from' in sops.
FORGEJO__mailer__FROM = config.sops.placeholder.gitea_mailer_from or "git@example.com";
# Sometimes it is helpful to use a different address on the envelope. Set this to use
# ENVELOPE_FROM as the from on the envelope. Set to `<>` to send an empty address.
#FORGEJO__mailer__ENVELOPE_FROM = "";

View file

@ -1,278 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.homelab.apps.solidtime;
networkName = "solidtime";
internalNetworkName = "solidtime-internal";
proxyNet = config.homelab.apps.traefiik.sharedNetworkName;
user = "1000:1000";
# dbExternalPort = ...;
dbInternalPort = 5432;
gotenbergPort = 3000;
inherit (config.virtualisation.oci-containers) containers;
solidtimeImageName = "solidtime/solidtime";
version = "0.10.0";
solidtimeImage = "${solidtimeImageName}:${version}";
solidtimeImageFile = pkgs.dockerTools.pullImage {
imageName = solidtimeImageName;
finalImageTag = version;
imageDigest = "sha256:817d3a366ecc39f0473d7154372afa82dd4e6e50c66d70be45804892c8421cbb";
sha256 = "sha256-h5aCKaquUF/EVsOHaLOHrn1HAoXZYPhAbJ+e4cmjSA8=";
};
volumes = [
"solidtime-storage:/var/www/html/storage"
"solidtime-logs:/var/www/html/storage/logs"
"solidtime-app:/var/www/html/storage/app"
];
# laravel.env
laravelEnv = {
APP_NAME = "Solidtime";
VITE_APP_NAME = laravelEnv.APP_NAME;
APP_ENV = "production";
APP_DEBUG = "false";
APP_URL = "http://localhost:${toString cfg.port}";
APP_FORCE_HTTPS = "false";
APP_ENABLE_REGISTRATION = "false";
TRUSTED_PROXIES = "0.0.0.0/0,2000:0:0:0:0:0:0:0/3";
# Logging
LOG_CHANNEL = "stderr_daily";
LOG_LEVEL = "debug";
# Database
DB_CONNECTION = "pgsql";
DB_HOST = containers.solidtimeDb.hostname;
DB_PORT = toString dbInternalPort;
DB_SSL_MODE = "require";
DB_DATABASE = "solidtime";
DB_USERNAME = "solidtime";
DB_PASSWORD = "ChangeMe";
# Mail
#MAIL_MAILER = "smtp";
#MAIL_HOST = "smtp.gmail.com";
#MAIL_PORT = "465";
#MAIL_ENCRYPTION = "tls";
#MAIL_FROM_ADDRESS = "no-reply@time.depeuter.dev";
MAIL_FROM_NAME = laravelEnv.APP_NAME;
#MAIL_USERNAME = "kmtl.hugo@gmail.com";
#MAIL_PASSWORD = "fhfxoequhhqidrhd";
# Queue
QUEUE_CONNECTION = "database";
# File storage
FILESYSTEM_DISK = "local";
PUBLIC_FILESYSTEM_DISK = "public";
# Services
GOTENBERG_URL = "http://${containers.solidtimeGotenberg.hostname}:${toString gotenbergPort}";
};
in {
options.homelab.apps.solidtime = {
enable = lib.mkEnableOption "Solidtime time tracker using Docker";
port = lib.mkOption {
type = lib.types.int;
default = 8000;
description = "Solidtime WebUI port";
};
exposePort = lib.mkEnableOption "Expose Soldtime port";
};
config = lib.mkIf cfg.enable {
homelab.virtualisation.containers.enable = true;
# Make sure the Docker network exists.
systemd.services = {
"docker-${networkName}-create-network" = {
description = "Create Docker network for ${networkName}";
requiredBy = [
"${containers.solidtime.serviceName}.service"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
if ! ${pkgs.docker}/bin/docker network ls | grep -q ${networkName}; then
${pkgs.docker}/bin/docker network create ${networkName}
fi
'';
};
"docker-${internalNetworkName}-create-network" = {
description = "Create Docker network for ${internalNetworkName}";
requiredBy = [
"${containers.solidtime.serviceName}.service"
"${containers.solidtimeScheduler.serviceName}.service"
"${containers.solidtimeQueue.serviceName}.service"
"${containers.solidtimeDb.serviceName}.service"
"${containers.solidtimeGotenberg.serviceName}.service"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
if ! ${pkgs.docker}/bin/docker network ls | grep -q ${internalNetworkName}; then
${pkgs.docker}/bin/docker network create ${internalNetworkName}
fi
'';
};
};
virtualisation.oci-containers.containers = {
solidtime = {
hostname = "solidtime";
image = solidtimeImage;
imageFile = solidtimeImageFile;
inherit user;
autoStart = true;
dependsOn = [
"solidtimeDb"
];
ports = [
# Open ports if you don't use Traefik
"${toString cfg.port}:8000"
];
networks = [
networkName
internalNetworkName
];
extraOptions = [
# Healthecks
# test: [ "CMD", "curl", "--fail", "http://localhost:8000/health-check/up" ]
''--health-cmd=curl --fail http://localhost:8000/health-check/up''
];
inherit volumes;
labels = {
"traefik.enable" = "true";
"traefik.http.routers.solidtime.rule" = "Host(`time.${config.networking.hostName}.depeuter.dev`)";
"traefik.http.services.solidtime.loadbalancer.server.port" = toString cfg.port;
};
environmentFiles = [
"/home/admin/.solidtime.env"
];
environment = laravelEnv // {
CONTAINER_MODE = "http";
};
};
solidtimeScheduler = {
hostname = "scheduler";
image = solidtimeImage;
imageFile = solidtimeImageFile;
inherit user;
autoStart = true;
dependsOn = [
"solidtimeDb"
];
networks = [
internalNetworkName
];
extraOptions = [
# Healthchecks
# test: [ "CMD", "healthcheck" ]
''--health-cmd="healthcheck"''
];
inherit volumes;
environmentFiles = [
"/home/admin/.solidtime.env"
];
environment = laravelEnv // {
CONTAINER_MODE = "scheduler";
};
};
solidtimeQueue = {
hostname = "queue";
image = solidtimeImage;
imageFile = solidtimeImageFile;
inherit user;
autoStart = true;
networks = [
internalNetworkName
];
extraOptions = [
# Healthchecks
# test: [ "CMD", "healthcheck" ]
''--health-cmd="healthcheck"''
];
inherit volumes;
dependsOn = [
"solidtimeDb"
];
environmentFiles = [
"/home/admin/.solidtime.env"
];
environment = laravelEnv // {
CONTAINER_MODE = "worker";
WORKER_COMMAND = "php /var/www/html/artisan queue:work";
};
};
solidtimeDb = let
imageName = "postgres";
finalImageTag = "15";
in {
hostname = "database";
image = "${imageName}:${finalImageTag}";
imageFile = pkgs.dockerTools.pullImage {
inherit imageName finalImageTag;
imageDigest = "sha256:98fe06b500b5eb29e45bf8c073eb0ca399790ce17b1d586448edc4203627d342";
sha256 = "sha256-AZ4VkOlROX+nR/MjDjsA4xdHzmtKjiBAtsp2Q6IdOvg=";
};
autoStart = true;
ports = [
# "${toString dbExternalPort}:${toString dbInternalPort}"
];
networks = [
internalNetworkName
];
extraOptions = [
# Healthchecks
# test: - CMD - pg_isready - '-q' - '-d' - '${DB_DATABASE}' - '-U' - '${DB_USERNAME}' retries: 3 timeout: 5s
''--health-cmd="pg_isready -q -d ${laravelEnv.DB_DATABASE} -U ${laravelEnv.DB_USERNAME}"''
"--health-retries=3"
"--health-timeout=5s"
];
volumes = [
"solidtime-db:/var/lib/postgresql/data"
];
environment = {
PGPASSWORD = laravelEnv.DB_PASSWORD;
POSTGRES_DB = laravelEnv.DB_DATABASE;
POSTGRES_USER = laravelEnv.DB_USERNAME;
POSTGRES_PASSWORD = laravelEnv.DB_PASSWORD;
};
};
solidtimeGotenberg = let
imageName = "gotenberg/gotenberg";
finalImageTag = "8.26.0";
in {
hostname = "gotenberg";
image = "${imageName}:${finalImageTag}";
imageFile = pkgs.dockerTools.pullImage {
inherit imageName finalImageTag;
imageDigest = "sha256:328551506b3dec3ff6381dd47e5cd72a44def97506908269e201a8fbfa1c12c0";
sha256 = "sha256-1zz4xDAgXxHUnkCVIfjHTgXb82EFEx+5am6Cu9+eZj4=";
};
autoStart = true;
networks = [
internalNetworkName
];
extraOptions = [
# Healthchecks
# test: [ "CMD", "curl", "--silent", "--fail", "http://localhost:3000/health" ]
''--health-cmd="curl --silent --fail http://localhost:${toString gotenbergPort}/health"''
];
};
};
};
}

View file

@ -72,7 +72,7 @@ in {
# Certificates
"--certificatesresolvers.letsencrypt.acme.dnschallenge=true"
"--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare"
"--certificatesresolvers.letsencrypt.acme.email=tibo.depeuter@telenet.be"
"--certificatesresolvers.letsencrypt.acme.email=${config.sops.placeholder.acme_email or "acme-email@example.com"}"
"--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json"
];
volumes = [

View file

@ -13,12 +13,12 @@ in {
description = "Vaultwarden WebUI port";
};
domain = lib.mkOption {
type = lib.types.string;
type = lib.types.str;
example = "https://vault.depeuter.dev";
description = "Domain to configure Vaultwarden on";
};
name = lib.mkOption {
type = lib.types.string;
type = lib.types.str;
example = "Hugo's Vault";
description = "Service name to use for invitations and mail";
};
@ -77,7 +77,7 @@ in {
dataDir = "/data";
in {
hostname = "vaultwarden";
image = "vaultwarden/server:1.34.3-alpine";
image = "vaultwarden/server:1.35.4-alpine";
autoStart = true;
ports = [
"${toString cfg.port}:80/tcp"
@ -344,6 +344,7 @@ in {
# ORG_CREATION_USERS=none
## A comma-separated list means only those users can create orgs:
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
# TODO Hugo: Redact org creation users if needed.
## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true
@ -590,7 +591,7 @@ in {
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
SMTP_HOST = "smtp.gmail.com";
SMTP_FROM = "vault@depeuter.dev";
SMTP_FROM = config.sops.placeholder.vaultwarden_smtp_from or "vaultwarden@example.com";
SMTP_FROM_NAME = cfg.name;
# SMTP_USERNAME=username
# SMTP_PASSWORD=password

View file

@ -1,4 +1,9 @@
{
imports = [
./networking.nix
./secrets.nix
];
config = {
homelab = {
services.openssh.enable = true;

View file

@ -0,0 +1,19 @@
{ config, lib, ... }:
{
options.homelab.networking = {
hostIp = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
The primary IP address of the host.
Used for automated deployment and internal service discovery.
'';
};
};
config = lib.mkIf (config.homelab.networking.hostIp != null) {
# If a hostIp is provided, we can potentially use it to configure
# networking interfaces or firewall rules automatically here in the future.
};
}

View file

@ -0,0 +1,18 @@
{ config, lib, ... }:
{
sops.secrets = {
# -- User Public Keys (Anti-Fingerprinting) --
"user_keys_admin" = { neededForUsers = true; };
"user_keys_deploy" = { neededForUsers = true; };
"user_keys_backup" = { neededForUsers = true; };
# -- Infrastructure Metadata --
# Hugo TODO: Populate these in your .sops.yaml / secrets file
"acme_email" = {};
"cloudflare_dns_token" = {};
"pgadmin_email" = {};
"gitea_mailer_from" = {};
"vaultwarden_smtp_from" = {};
};
}

29
test/vm-test.nix Normal file
View file

@ -0,0 +1,29 @@
{ self, nixpkgs, ... }:
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
in
pkgs.nixosTest {
name = "deploy-user-test";
nodes = {
machine = { ... }: {
imports = [
../modules
../users
];
homelab.users.deploy.enable = true;
system.stateVersion = "24.11"; # Match the current nixpkgs version
};
};
testScript = ''
machine.wait_for_unit("multi-user.target")
# Verify user exists
machine.succeed("id deploy")
# Verify we can run nix-env as deploy via sudo
machine.succeed("sudo -u deploy -n nix-env --version")
# Verify switch-to-configuration is accessible (it's added to path by the module)
machine.succeed("whereis switch-to-configuration")
'';
}

View file

@ -26,7 +26,9 @@ in {
config.users.groups.wheel.name # Enable 'sudo' for the user.
];
initialPassword = "ChangeMe";
openssh.authorizedKeys.keys = cfg.authorizedKeys;
openssh.authorizedKeys.keyFiles = [
config.sops.secrets.user_keys_admin.path
];
packages = with pkgs; [
curl
git

View file

@ -12,9 +12,8 @@ in {
extraGroups = [
"docker" # Allow access to the docker socket.
];
openssh.authorizedKeys.keys = [
# Hugo
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICms6vjhE9kOlqV5GBPGInwUHAfCSVHLI2Gtzee0VXPh"
openssh.authorizedKeys.keyFiles = [
config.sops.secrets.user_keys_backup.path
];
};
};

View file

@ -3,7 +3,19 @@
let
cfg = config.homelab.users.deploy;
in {
options.homelab.users.deploy.enable = lib.mkEnableOption "user Deploy";
options.homelab.users.deploy = {
enable = lib.mkEnableOption "user Deploy";
authorizedKeys = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
description = ''
Additional SSH public keys authorized for the deploy user.
The CI runner key should be provided as a base key; personal
workstation keys can be appended here per host or globally.
'';
};
};
config = lib.mkIf cfg.enable {
users = {
@ -15,12 +27,15 @@ in {
isSystemUser = true;
home = "/var/empty";
shell = pkgs.bashInteractive;
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPrG+ldRBdCeHEXrsy/qHXIJYg8xQXVuiUR0DxhFjYNg"
openssh.authorizedKeys.keyFiles = [
config.sops.secrets.user_keys_deploy.path
];
};
};
# Allow the deploy user to push closures to the nix store
nix.settings.trusted-users = [ "deploy" ];
security.sudo.extraRules = [
{
groups = [